Merge branch 'for-5.20/io_uring' into for-5.20/io_uring-zerocopy-send
* for-5.20/io_uring: (716 commits) io_uring: ensure REQ_F_ISREG is set async offload net: fix compat pointer in get_compat_msghdr() io_uring: Don't require reinitable percpu_ref io_uring: fix types in io_recvmsg_multishot_overflow io_uring: Use atomic_long_try_cmpxchg in __io_account_mem io_uring: support multishot in recvmsg net: copy from user before calling __get_compat_msghdr net: copy from user before calling __copy_msghdr io_uring: support 0 length iov in buffer select in compat io_uring: fix multishot ending when not polled io_uring: add netmsg cache io_uring: impose max limit on apoll cache io_uring: add abstraction around apoll cache io_uring: move apoll cache to poll.c io_uring: consolidate hash_locked io-wq handling io_uring: clear REQ_F_HASH_LOCKED on hash removal io_uring: don't race double poll setting REQ_F_ASYNC_DATA io_uring: don't miss setting REQ_F_DOUBLE_POLL io_uring: disable multishot recvmsg io_uring: only trace one of complete or overflow ... Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
commit
4effe18fc0
633 changed files with 21989 additions and 16796 deletions
|
|
@ -264,7 +264,8 @@ struct css_set {
|
|||
* List of csets participating in the on-going migration either as
|
||||
* source or destination. Protected by cgroup_mutex.
|
||||
*/
|
||||
struct list_head mg_preload_node;
|
||||
struct list_head mg_src_preload_node;
|
||||
struct list_head mg_dst_preload_node;
|
||||
struct list_head mg_node;
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -68,6 +68,8 @@ extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr,
|
|||
extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf);
|
||||
extern ssize_t cpu_show_retbleed(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
|
||||
extern __printf(4, 5)
|
||||
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
||||
|
|
|
|||
|
|
@ -149,19 +149,19 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
|
|||
* It is used in atomic context when code wants to access the contents of a
|
||||
* page that might be allocated from high memory (see __GFP_HIGHMEM), for
|
||||
* example a page in the pagecache. The API has two functions, and they
|
||||
* can be used in a manner similar to the following:
|
||||
* can be used in a manner similar to the following::
|
||||
*
|
||||
* -- Find the page of interest. --
|
||||
* struct page *page = find_get_page(mapping, offset);
|
||||
* // Find the page of interest.
|
||||
* struct page *page = find_get_page(mapping, offset);
|
||||
*
|
||||
* -- Gain access to the contents of that page. --
|
||||
* void *vaddr = kmap_atomic(page);
|
||||
* // Gain access to the contents of that page.
|
||||
* void *vaddr = kmap_atomic(page);
|
||||
*
|
||||
* -- Do something to the contents of that page. --
|
||||
* memset(vaddr, 0, PAGE_SIZE);
|
||||
* // Do something to the contents of that page.
|
||||
* memset(vaddr, 0, PAGE_SIZE);
|
||||
*
|
||||
* -- Unmap that page. --
|
||||
* kunmap_atomic(vaddr);
|
||||
* // Unmap that page.
|
||||
* kunmap_atomic(vaddr);
|
||||
*
|
||||
* Note that the kunmap_atomic() call takes the result of the kmap_atomic()
|
||||
* call, not the argument.
|
||||
|
|
|
|||
544
include/linux/io_uring_types.h
Normal file
544
include/linux/io_uring_types.h
Normal file
|
|
@ -0,0 +1,544 @@
|
|||
#ifndef IO_URING_TYPES_H
|
||||
#define IO_URING_TYPES_H
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/task_work.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <uapi/linux/io_uring.h>
|
||||
|
||||
struct io_wq_work_node {
|
||||
struct io_wq_work_node *next;
|
||||
};
|
||||
|
||||
struct io_wq_work_list {
|
||||
struct io_wq_work_node *first;
|
||||
struct io_wq_work_node *last;
|
||||
};
|
||||
|
||||
struct io_wq_work {
|
||||
struct io_wq_work_node list;
|
||||
unsigned flags;
|
||||
/* place it here instead of io_kiocb as it fills padding and saves 4B */
|
||||
int cancel_seq;
|
||||
};
|
||||
|
||||
struct io_fixed_file {
|
||||
/* file * with additional FFS_* flags */
|
||||
unsigned long file_ptr;
|
||||
};
|
||||
|
||||
struct io_file_table {
|
||||
struct io_fixed_file *files;
|
||||
unsigned long *bitmap;
|
||||
unsigned int alloc_hint;
|
||||
};
|
||||
|
||||
struct io_hash_bucket {
|
||||
spinlock_t lock;
|
||||
struct hlist_head list;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct io_hash_table {
|
||||
struct io_hash_bucket *hbs;
|
||||
unsigned hash_bits;
|
||||
};
|
||||
|
||||
struct io_uring {
|
||||
u32 head ____cacheline_aligned_in_smp;
|
||||
u32 tail ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
/*
|
||||
* This data is shared with the application through the mmap at offsets
|
||||
* IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
|
||||
*
|
||||
* The offsets to the member fields are published through struct
|
||||
* io_sqring_offsets when calling io_uring_setup.
|
||||
*/
|
||||
struct io_rings {
|
||||
/*
|
||||
* Head and tail offsets into the ring; the offsets need to be
|
||||
* masked to get valid indices.
|
||||
*
|
||||
* The kernel controls head of the sq ring and the tail of the cq ring,
|
||||
* and the application controls tail of the sq ring and the head of the
|
||||
* cq ring.
|
||||
*/
|
||||
struct io_uring sq, cq;
|
||||
/*
|
||||
* Bitmasks to apply to head and tail offsets (constant, equals
|
||||
* ring_entries - 1)
|
||||
*/
|
||||
u32 sq_ring_mask, cq_ring_mask;
|
||||
/* Ring sizes (constant, power of 2) */
|
||||
u32 sq_ring_entries, cq_ring_entries;
|
||||
/*
|
||||
* Number of invalid entries dropped by the kernel due to
|
||||
* invalid index stored in array
|
||||
*
|
||||
* Written by the kernel, shouldn't be modified by the
|
||||
* application (i.e. get number of "new events" by comparing to
|
||||
* cached value).
|
||||
*
|
||||
* After a new SQ head value was read by the application this
|
||||
* counter includes all submissions that were dropped reaching
|
||||
* the new SQ head (and possibly more).
|
||||
*/
|
||||
u32 sq_dropped;
|
||||
/*
|
||||
* Runtime SQ flags
|
||||
*
|
||||
* Written by the kernel, shouldn't be modified by the
|
||||
* application.
|
||||
*
|
||||
* The application needs a full memory barrier before checking
|
||||
* for IORING_SQ_NEED_WAKEUP after updating the sq tail.
|
||||
*/
|
||||
atomic_t sq_flags;
|
||||
/*
|
||||
* Runtime CQ flags
|
||||
*
|
||||
* Written by the application, shouldn't be modified by the
|
||||
* kernel.
|
||||
*/
|
||||
u32 cq_flags;
|
||||
/*
|
||||
* Number of completion events lost because the queue was full;
|
||||
* this should be avoided by the application by making sure
|
||||
* there are not more requests pending than there is space in
|
||||
* the completion queue.
|
||||
*
|
||||
* Written by the kernel, shouldn't be modified by the
|
||||
* application (i.e. get number of "new events" by comparing to
|
||||
* cached value).
|
||||
*
|
||||
* As completion events come in out of order this counter is not
|
||||
* ordered with any other data.
|
||||
*/
|
||||
u32 cq_overflow;
|
||||
/*
|
||||
* Ring buffer of completion events.
|
||||
*
|
||||
* The kernel writes completion events fresh every time they are
|
||||
* produced, so the application is allowed to modify pending
|
||||
* entries.
|
||||
*/
|
||||
struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
struct io_restriction {
|
||||
DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
|
||||
DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
|
||||
u8 sqe_flags_allowed;
|
||||
u8 sqe_flags_required;
|
||||
bool registered;
|
||||
};
|
||||
|
||||
struct io_submit_link {
|
||||
struct io_kiocb *head;
|
||||
struct io_kiocb *last;
|
||||
};
|
||||
|
||||
struct io_submit_state {
|
||||
/* inline/task_work completion list, under ->uring_lock */
|
||||
struct io_wq_work_node free_list;
|
||||
/* batch completion logic */
|
||||
struct io_wq_work_list compl_reqs;
|
||||
struct io_submit_link link;
|
||||
|
||||
bool plug_started;
|
||||
bool need_plug;
|
||||
unsigned short submit_nr;
|
||||
struct blk_plug plug;
|
||||
};
|
||||
|
||||
struct io_ev_fd {
|
||||
struct eventfd_ctx *cq_ev_fd;
|
||||
unsigned int eventfd_async: 1;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
struct io_alloc_cache {
|
||||
struct hlist_head list;
|
||||
unsigned int nr_cached;
|
||||
};
|
||||
|
||||
struct io_ring_ctx {
|
||||
/* const or read-mostly hot data */
|
||||
struct {
|
||||
struct percpu_ref refs;
|
||||
|
||||
struct io_rings *rings;
|
||||
unsigned int flags;
|
||||
enum task_work_notify_mode notify_method;
|
||||
unsigned int compat: 1;
|
||||
unsigned int drain_next: 1;
|
||||
unsigned int restricted: 1;
|
||||
unsigned int off_timeout_used: 1;
|
||||
unsigned int drain_active: 1;
|
||||
unsigned int drain_disabled: 1;
|
||||
unsigned int has_evfd: 1;
|
||||
unsigned int syscall_iopoll: 1;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/* submission data */
|
||||
struct {
|
||||
struct mutex uring_lock;
|
||||
|
||||
/*
|
||||
* Ring buffer of indices into array of io_uring_sqe, which is
|
||||
* mmapped by the application using the IORING_OFF_SQES offset.
|
||||
*
|
||||
* This indirection could e.g. be used to assign fixed
|
||||
* io_uring_sqe entries to operations and only submit them to
|
||||
* the queue when needed.
|
||||
*
|
||||
* The kernel modifies neither the indices array nor the entries
|
||||
* array.
|
||||
*/
|
||||
u32 *sq_array;
|
||||
struct io_uring_sqe *sq_sqes;
|
||||
unsigned cached_sq_head;
|
||||
unsigned sq_entries;
|
||||
|
||||
/*
|
||||
* Fixed resources fast path, should be accessed only under
|
||||
* uring_lock, and updated through io_uring_register(2)
|
||||
*/
|
||||
struct io_rsrc_node *rsrc_node;
|
||||
int rsrc_cached_refs;
|
||||
atomic_t cancel_seq;
|
||||
struct io_file_table file_table;
|
||||
unsigned nr_user_files;
|
||||
unsigned nr_user_bufs;
|
||||
struct io_mapped_ubuf **user_bufs;
|
||||
|
||||
struct io_submit_state submit_state;
|
||||
|
||||
struct io_buffer_list *io_bl;
|
||||
struct xarray io_bl_xa;
|
||||
struct list_head io_buffers_cache;
|
||||
|
||||
struct io_hash_table cancel_table_locked;
|
||||
struct list_head cq_overflow_list;
|
||||
struct io_alloc_cache apoll_cache;
|
||||
struct io_alloc_cache netmsg_cache;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/* IRQ completion list, under ->completion_lock */
|
||||
struct io_wq_work_list locked_free_list;
|
||||
unsigned int locked_free_nr;
|
||||
|
||||
const struct cred *sq_creds; /* cred used for __io_sq_thread() */
|
||||
struct io_sq_data *sq_data; /* if using sq thread polling */
|
||||
|
||||
struct wait_queue_head sqo_sq_wait;
|
||||
struct list_head sqd_list;
|
||||
|
||||
unsigned long check_cq;
|
||||
|
||||
unsigned int file_alloc_start;
|
||||
unsigned int file_alloc_end;
|
||||
|
||||
struct xarray personalities;
|
||||
u32 pers_next;
|
||||
|
||||
struct {
|
||||
/*
|
||||
* We cache a range of free CQEs we can use, once exhausted it
|
||||
* should go through a slower range setup, see __io_get_cqe()
|
||||
*/
|
||||
struct io_uring_cqe *cqe_cached;
|
||||
struct io_uring_cqe *cqe_sentinel;
|
||||
|
||||
unsigned cached_cq_tail;
|
||||
unsigned cq_entries;
|
||||
struct io_ev_fd __rcu *io_ev_fd;
|
||||
struct wait_queue_head cq_wait;
|
||||
unsigned cq_extra;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct {
|
||||
spinlock_t completion_lock;
|
||||
|
||||
/*
|
||||
* ->iopoll_list is protected by the ctx->uring_lock for
|
||||
* io_uring instances that don't use IORING_SETUP_SQPOLL.
|
||||
* For SQPOLL, only the single threaded io_sq_thread() will
|
||||
* manipulate the list, hence no extra locking is needed there.
|
||||
*/
|
||||
struct io_wq_work_list iopoll_list;
|
||||
struct io_hash_table cancel_table;
|
||||
bool poll_multi_queue;
|
||||
|
||||
struct list_head io_buffers_comp;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/* timeouts */
|
||||
struct {
|
||||
spinlock_t timeout_lock;
|
||||
atomic_t cq_timeouts;
|
||||
struct list_head timeout_list;
|
||||
struct list_head ltimeout_list;
|
||||
unsigned cq_last_tm_flush;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
/* Keep this last, we don't need it for the fast path */
|
||||
|
||||
struct io_restriction restrictions;
|
||||
struct task_struct *submitter_task;
|
||||
|
||||
/* slow path rsrc auxilary data, used by update/register */
|
||||
struct io_rsrc_node *rsrc_backup_node;
|
||||
struct io_mapped_ubuf *dummy_ubuf;
|
||||
struct io_rsrc_data *file_data;
|
||||
struct io_rsrc_data *buf_data;
|
||||
|
||||
struct delayed_work rsrc_put_work;
|
||||
struct llist_head rsrc_put_llist;
|
||||
struct list_head rsrc_ref_list;
|
||||
spinlock_t rsrc_ref_lock;
|
||||
|
||||
struct list_head io_buffers_pages;
|
||||
|
||||
#if defined(CONFIG_UNIX)
|
||||
struct socket *ring_sock;
|
||||
#endif
|
||||
/* hashed buffered write serialization */
|
||||
struct io_wq_hash *hash_map;
|
||||
|
||||
/* Only used for accounting purposes */
|
||||
struct user_struct *user;
|
||||
struct mm_struct *mm_account;
|
||||
|
||||
/* ctx exit and cancelation */
|
||||
struct llist_head fallback_llist;
|
||||
struct delayed_work fallback_work;
|
||||
struct work_struct exit_work;
|
||||
struct list_head tctx_list;
|
||||
struct completion ref_comp;
|
||||
|
||||
/* io-wq management, e.g. thread count */
|
||||
u32 iowq_limits[2];
|
||||
bool iowq_limits_set;
|
||||
|
||||
struct list_head defer_list;
|
||||
unsigned sq_thread_idle;
|
||||
/* protected by ->completion_lock */
|
||||
unsigned evfd_last_cq_tail;
|
||||
};
|
||||
|
||||
enum {
|
||||
REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
|
||||
REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
|
||||
REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
|
||||
REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
|
||||
REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
|
||||
REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
|
||||
REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
|
||||
|
||||
/* first byte is taken by user flags, shift it to not overlap */
|
||||
REQ_F_FAIL_BIT = 8,
|
||||
REQ_F_INFLIGHT_BIT,
|
||||
REQ_F_CUR_POS_BIT,
|
||||
REQ_F_NOWAIT_BIT,
|
||||
REQ_F_LINK_TIMEOUT_BIT,
|
||||
REQ_F_NEED_CLEANUP_BIT,
|
||||
REQ_F_POLLED_BIT,
|
||||
REQ_F_BUFFER_SELECTED_BIT,
|
||||
REQ_F_BUFFER_RING_BIT,
|
||||
REQ_F_REISSUE_BIT,
|
||||
REQ_F_CREDS_BIT,
|
||||
REQ_F_REFCOUNT_BIT,
|
||||
REQ_F_ARM_LTIMEOUT_BIT,
|
||||
REQ_F_ASYNC_DATA_BIT,
|
||||
REQ_F_SKIP_LINK_CQES_BIT,
|
||||
REQ_F_SINGLE_POLL_BIT,
|
||||
REQ_F_DOUBLE_POLL_BIT,
|
||||
REQ_F_PARTIAL_IO_BIT,
|
||||
REQ_F_CQE32_INIT_BIT,
|
||||
REQ_F_APOLL_MULTISHOT_BIT,
|
||||
REQ_F_CLEAR_POLLIN_BIT,
|
||||
REQ_F_HASH_LOCKED_BIT,
|
||||
/* keep async read/write and isreg together and in order */
|
||||
REQ_F_SUPPORT_NOWAIT_BIT,
|
||||
REQ_F_ISREG_BIT,
|
||||
|
||||
/* not a real bit, just to check we're not overflowing the space */
|
||||
__REQ_F_LAST_BIT,
|
||||
};
|
||||
|
||||
enum {
|
||||
/* ctx owns file */
|
||||
REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
|
||||
/* drain existing IO first */
|
||||
REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
|
||||
/* linked sqes */
|
||||
REQ_F_LINK = BIT(REQ_F_LINK_BIT),
|
||||
/* doesn't sever on completion < 0 */
|
||||
REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
|
||||
/* IOSQE_ASYNC */
|
||||
REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
|
||||
/* IOSQE_BUFFER_SELECT */
|
||||
REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
|
||||
/* IOSQE_CQE_SKIP_SUCCESS */
|
||||
REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
|
||||
|
||||
/* fail rest of links */
|
||||
REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
|
||||
/* on inflight list, should be cancelled and waited on exit reliably */
|
||||
REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
|
||||
/* read/write uses file position */
|
||||
REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
|
||||
/* must not punt to workers */
|
||||
REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
|
||||
/* has or had linked timeout */
|
||||
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
|
||||
/* needs cleanup */
|
||||
REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
|
||||
/* already went through poll handler */
|
||||
REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
|
||||
/* buffer already selected */
|
||||
REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
|
||||
/* buffer selected from ring, needs commit */
|
||||
REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
|
||||
/* caller should reissue async */
|
||||
REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
|
||||
/* supports async reads/writes */
|
||||
REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
|
||||
/* regular file */
|
||||
REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
|
||||
/* has creds assigned */
|
||||
REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
|
||||
/* skip refcounting if not set */
|
||||
REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
|
||||
/* there is a linked timeout that has to be armed */
|
||||
REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
|
||||
/* ->async_data allocated */
|
||||
REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
|
||||
/* don't post CQEs while failing linked requests */
|
||||
REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
|
||||
/* single poll may be active */
|
||||
REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
|
||||
/* double poll may active */
|
||||
REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
|
||||
/* request has already done partial IO */
|
||||
REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
|
||||
/* fast poll multishot mode */
|
||||
REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
|
||||
/* ->extra1 and ->extra2 are initialised */
|
||||
REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
|
||||
/* recvmsg special flag, clear EPOLLIN */
|
||||
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
|
||||
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
|
||||
REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
|
||||
};
|
||||
|
||||
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
|
||||
|
||||
struct io_task_work {
|
||||
struct llist_node node;
|
||||
io_req_tw_func_t func;
|
||||
};
|
||||
|
||||
struct io_cqe {
|
||||
__u64 user_data;
|
||||
__s32 res;
|
||||
/* fd initially, then cflags for completion */
|
||||
union {
|
||||
__u32 flags;
|
||||
int fd;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* Each request type overlays its private data structure on top of this one.
|
||||
* They must not exceed this one in size.
|
||||
*/
|
||||
struct io_cmd_data {
|
||||
struct file *file;
|
||||
/* each command gets 56 bytes of data */
|
||||
__u8 data[56];
|
||||
};
|
||||
|
||||
#define io_kiocb_to_cmd(req) ((void *) &(req)->cmd)
|
||||
#define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
|
||||
|
||||
struct io_kiocb {
|
||||
union {
|
||||
/*
|
||||
* NOTE! Each of the io_kiocb union members has the file pointer
|
||||
* as the first entry in their struct definition. So you can
|
||||
* access the file pointer through any of the sub-structs,
|
||||
* or directly as just 'file' in this struct.
|
||||
*/
|
||||
struct file *file;
|
||||
struct io_cmd_data cmd;
|
||||
};
|
||||
|
||||
u8 opcode;
|
||||
/* polled IO has completed */
|
||||
u8 iopoll_completed;
|
||||
/*
|
||||
* Can be either a fixed buffer index, or used with provided buffers.
|
||||
* For the latter, before issue it points to the buffer group ID,
|
||||
* and after selection it points to the buffer ID itself.
|
||||
*/
|
||||
u16 buf_index;
|
||||
unsigned int flags;
|
||||
|
||||
struct io_cqe cqe;
|
||||
|
||||
struct io_ring_ctx *ctx;
|
||||
struct task_struct *task;
|
||||
|
||||
struct io_rsrc_node *rsrc_node;
|
||||
|
||||
union {
|
||||
/* store used ubuf, so we can prevent reloading */
|
||||
struct io_mapped_ubuf *imu;
|
||||
|
||||
/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
|
||||
struct io_buffer *kbuf;
|
||||
|
||||
/*
|
||||
* stores buffer ID for ring provided buffers, valid IFF
|
||||
* REQ_F_BUFFER_RING is set.
|
||||
*/
|
||||
struct io_buffer_list *buf_list;
|
||||
};
|
||||
|
||||
union {
|
||||
/* used by request caches, completion batching and iopoll */
|
||||
struct io_wq_work_node comp_list;
|
||||
/* cache ->apoll->events */
|
||||
__poll_t apoll_events;
|
||||
};
|
||||
atomic_t refs;
|
||||
atomic_t poll_refs;
|
||||
struct io_task_work io_task_work;
|
||||
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
|
||||
union {
|
||||
struct hlist_node hash_node;
|
||||
struct {
|
||||
u64 extra1;
|
||||
u64 extra2;
|
||||
};
|
||||
};
|
||||
/* internal polling, see IORING_FEAT_FAST_POLL */
|
||||
struct async_poll *apoll;
|
||||
/* opcode allocated if it needs to store data for async defer */
|
||||
void *async_data;
|
||||
/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
|
||||
struct io_kiocb *link;
|
||||
/* custom credentials, valid IFF REQ_F_CREDS is set */
|
||||
const struct cred *creds;
|
||||
struct io_wq_work work;
|
||||
};
|
||||
|
||||
struct io_overflow_cqe {
|
||||
struct list_head list;
|
||||
struct io_uring_cqe cqe;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
@ -452,6 +452,12 @@ static inline int kexec_crash_loaded(void) { return 0; }
|
|||
#define kexec_in_progress false
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
#ifdef CONFIG_KEXEC_SIG
|
||||
void set_kexec_sig_enforced(void);
|
||||
#else
|
||||
static inline void set_kexec_sig_enforced(void) {}
|
||||
#endif
|
||||
|
||||
#endif /* !defined(__ASSEBMLY__) */
|
||||
|
||||
#endif /* LINUX_KEXEC_H */
|
||||
|
|
|
|||
|
|
@ -1513,7 +1513,7 @@ static inline void kvm_arch_end_assignment(struct kvm *kvm)
|
|||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
|
||||
static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
@ -1822,6 +1822,15 @@ struct _kvm_stats_desc {
|
|||
STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
|
||||
KVM_STATS_BASE_POW10, 0)
|
||||
|
||||
/* Instantaneous boolean value, read only */
|
||||
#define STATS_DESC_IBOOLEAN(SCOPE, name) \
|
||||
STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
|
||||
KVM_STATS_BASE_POW10, 0)
|
||||
/* Peak (sticky) boolean value, read/write */
|
||||
#define STATS_DESC_PBOOLEAN(SCOPE, name) \
|
||||
STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
|
||||
KVM_STATS_BASE_POW10, 0)
|
||||
|
||||
/* Cumulative time in nanosecond */
|
||||
#define STATS_DESC_TIME_NSEC(SCOPE, name) \
|
||||
STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
|
||||
|
|
@ -1853,7 +1862,7 @@ struct _kvm_stats_desc {
|
|||
HALT_POLL_HIST_COUNT), \
|
||||
STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
|
||||
HALT_POLL_HIST_COUNT), \
|
||||
STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking)
|
||||
STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
|
||||
|
||||
extern struct dentry *kvm_debugfs_dir;
|
||||
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ struct netfs_request_ops {
|
|||
void (*issue_read)(struct netfs_io_subrequest *subreq);
|
||||
bool (*is_still_valid)(struct netfs_io_request *rreq);
|
||||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct folio *folio, void **_fsdata);
|
||||
struct folio **foliop, void **_fsdata);
|
||||
void (*done)(struct netfs_io_request *rreq);
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -32,11 +32,16 @@ struct unwind_hint {
|
|||
*
|
||||
* UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
|
||||
* Useful for code which doesn't have an ELF function annotation.
|
||||
*
|
||||
* UNWIND_HINT_ENTRY: machine entry without stack, SYSCALL/SYSENTER etc.
|
||||
*/
|
||||
#define UNWIND_HINT_TYPE_CALL 0
|
||||
#define UNWIND_HINT_TYPE_REGS 1
|
||||
#define UNWIND_HINT_TYPE_REGS_PARTIAL 2
|
||||
#define UNWIND_HINT_TYPE_FUNC 3
|
||||
#define UNWIND_HINT_TYPE_ENTRY 4
|
||||
#define UNWIND_HINT_TYPE_SAVE 5
|
||||
#define UNWIND_HINT_TYPE_RESTORE 6
|
||||
|
||||
#ifdef CONFIG_OBJTOOL
|
||||
|
||||
|
|
@ -124,7 +129,7 @@ struct unwind_hint {
|
|||
* the debuginfo as necessary. It will also warn if it sees any
|
||||
* inconsistencies.
|
||||
*/
|
||||
.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
|
||||
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
|
||||
.Lunwind_hint_ip_\@:
|
||||
.pushsection .discard.unwind_hints
|
||||
/* struct unwind_hint */
|
||||
|
|
@ -177,7 +182,7 @@ struct unwind_hint {
|
|||
#define ASM_REACHABLE
|
||||
#else
|
||||
#define ANNOTATE_INTRA_FUNCTION_CALL
|
||||
.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
|
||||
.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
|
||||
.endm
|
||||
.macro STACK_FRAME_NON_STANDARD func:req
|
||||
.endm
|
||||
|
|
|
|||
|
|
@ -731,7 +731,7 @@ static inline int __must_check
|
|||
devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
|
||||
struct reset_control_bulk_data *rstcs)
|
||||
{
|
||||
return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, true);
|
||||
return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ static inline void exit_thread(struct task_struct *tsk)
|
|||
extern __noreturn void do_group_exit(int);
|
||||
|
||||
extern void exit_files(struct task_struct *);
|
||||
extern void exit_itimers(struct signal_struct *);
|
||||
extern void exit_itimers(struct task_struct *);
|
||||
|
||||
extern pid_t kernel_clone(struct kernel_clone_args *kargs);
|
||||
struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
|
||||
|
|
|
|||
|
|
@ -390,6 +390,11 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
|
|||
static inline int setup_earlycon(char *buf) { return 0; }
|
||||
#endif
|
||||
|
||||
static inline bool uart_console_enabled(struct uart_port *port)
|
||||
{
|
||||
return uart_console(port) && (port->cons->flags & CON_ENABLED);
|
||||
}
|
||||
|
||||
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
|
||||
struct console *c);
|
||||
int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
|
||||
|
|
|
|||
|
|
@ -421,10 +421,9 @@ extern int recvmsg_copy_msghdr(struct msghdr *msg,
|
|||
struct user_msghdr __user *umsg, unsigned flags,
|
||||
struct sockaddr __user **uaddr,
|
||||
struct iovec **iov);
|
||||
extern int __copy_msghdr_from_user(struct msghdr *kmsg,
|
||||
struct user_msghdr __user *umsg,
|
||||
struct sockaddr __user **save_addr,
|
||||
struct iovec __user **uiov, size_t *nsegs);
|
||||
extern int __copy_msghdr(struct msghdr *kmsg,
|
||||
struct user_msghdr *umsg,
|
||||
struct sockaddr __user **save_addr);
|
||||
|
||||
/* helpers which do the actual work for syscalls */
|
||||
extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
|
||||
|
|
|
|||
|
|
@ -260,6 +260,7 @@ struct plat_stmmacenet_data {
|
|||
bool has_crossts;
|
||||
int int_snapshot_num;
|
||||
int ext_snapshot_num;
|
||||
bool int_snapshot_en;
|
||||
bool ext_snapshot_en;
|
||||
bool multi_msi_en;
|
||||
int msi_mac_vec;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue