Merge branch 'linus' into smp/urgent

Ensure that all usage sites of get/put_online_cpus() except for the
struggler in drivers/thermal are gone. So the last user and the deprecated
inlines can be removed.
This commit is contained in:
Thomas Gleixner 2021-09-11 00:38:47 +02:00
commit c2f4954c2d
8966 changed files with 550375 additions and 246658 deletions

View file

@ -249,7 +249,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
/* the following numa functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
static inline void
@ -1380,13 +1380,11 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
#endif
#ifdef CONFIG_ACPI
extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
extern void acpi_device_notify(struct device *dev);
extern void acpi_device_notify_remove(struct device *dev);
#else
static inline int
acpi_platform_notify(struct device *dev, enum kobject_action action)
{
return 0;
}
static inline void acpi_device_notify(struct device *dev) { }
static inline void acpi_device_notify_remove(struct device *dev) { }
#endif
#endif /*_LINUX_ACPI_H*/

View file

@ -116,6 +116,7 @@ struct bdi_writeback {
struct list_head b_dirty_time; /* time stamps are dirty */
spinlock_t list_lock; /* protects the b_* lists */
atomic_t writeback_inodes; /* number of inodes under writeback */
struct percpu_counter stat[NR_WB_STAT_ITEMS];
unsigned long congested; /* WB_[a]sync_congested flags */
@ -142,6 +143,7 @@ struct bdi_writeback {
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
unsigned long dirty_sleep; /* last wait */

View file

@ -143,7 +143,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
sb = inode->i_sb;
#ifdef CONFIG_BLOCK
if (sb_is_blkdev_sb(sb))
return I_BDEV(inode)->bd_bdi;
return I_BDEV(inode)->bd_disk->bdi;
#endif
return sb->s_bdi;
}
@ -288,6 +288,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
return inode->i_wb;
}
static inline struct bdi_writeback *inode_to_wb_wbc(
struct inode *inode,
struct writeback_control *wbc)
{
/*
* If wbc does not have inode attached, it means cgroup writeback was
* disabled when wbc started. Just use the default wb in that case.
*/
return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
}
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
@ -366,6 +377,14 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
return &inode_to_bdi(inode)->wb;
}
static inline struct bdi_writeback *inode_to_wb_wbc(
struct inode *inode,
struct writeback_control *wbc)
{
return inode_to_wb(inode);
}
static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{

View file

@ -5,7 +5,6 @@
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
#include <linux/highmem.h>
#include <linux/mempool.h>
#include <linux/ioprio.h>
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
@ -375,7 +374,7 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
extern void bio_trim(struct bio *bio, int offset, int size);
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
@ -401,6 +400,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
enum {
BIOSET_NEED_BVECS = BIT(0),
BIOSET_NEED_RESCUER = BIT(1),
BIOSET_PERCPU_CACHE = BIT(2),
};
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
@ -409,6 +409,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
struct bio_set *bs);
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
extern void bio_put(struct bio *);
@ -519,47 +521,6 @@ static inline void bio_clone_blkg_association(struct bio *dst,
struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
/*
* remember never ever reenable interrupts between a bvec_kmap_irq and
* bvec_kunmap_irq!
*/
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
{
unsigned long addr;
/*
* might not be a highmem page, but the preempt/irq count
* balancing is a lot nicer this way
*/
local_irq_save(*flags);
addr = (unsigned long) kmap_atomic(bvec->bv_page);
BUG_ON(addr & ~PAGE_MASK);
return (char *) addr + bvec->bv_offset;
}
static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
{
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
kunmap_atomic((void *) ptr);
local_irq_restore(*flags);
}
#else
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
{
return page_address(bvec->bv_page) + bvec->bv_offset;
}
static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
{
*flags = 0;
}
#endif
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
@ -699,6 +660,11 @@ struct bio_set {
struct kmem_cache *bio_slab;
unsigned int front_pad;
/*
* per-cpu bio alloc cache
*/
struct bio_alloc_cache __percpu *cache;
mempool_t bio_pool;
mempool_t bvec_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@ -715,6 +681,11 @@ struct bio_set {
struct bio_list rescue_list;
struct work_struct rescue_work;
struct workqueue_struct *rescue_workqueue;
/*
* Hot un-plug notifier for the per-cpu cache, if used
*/
struct hlist_node cpuhp_dead;
};
static inline bool bioset_initialized(struct bio_set *bs)

View file

@ -227,6 +227,12 @@ unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, un
int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count);
extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
int nmaskbits, loff_t off, size_t count);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))

View file

@ -4,6 +4,7 @@
#include <asm/types.h>
#include <linux/bits.h>
#include <linux/typecheck.h>
#include <uapi/linux/kernel.h>
@ -253,6 +254,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
__clear_bit(nr, addr);
}
/**
* __ptr_set_bit - Set bit in a pointer's value
* @nr: the bit to set
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* __ptr_set_bit(bit, &p);
*/
#define __ptr_set_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
__set_bit(nr, (unsigned long *)(addr)); \
})
/**
* __ptr_clear_bit - Clear bit in a pointer's value
* @nr: the bit to clear
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* __ptr_clear_bit(bit, &p);
*/
#define __ptr_clear_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
__clear_bit(nr, (unsigned long *)(addr)); \
})
/**
* __ptr_test_bit - Test bit in a pointer's value
* @nr: the bit to test
* @addr: the address of the pointer variable
*
* Example:
* void *p = foo();
* if (__ptr_test_bit(bit, &p)) {
* ...
* } else {
* ...
* }
*/
#define __ptr_test_bit(nr, addr) \
({ \
typecheck_pointer(*(addr)); \
test_bit(nr, (unsigned long *)(addr)); \
})
#ifdef __KERNEL__
#ifndef set_mask_bits

View file

@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
size_t size);
typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
struct seq_file *s);
struct blkcg_policy {
int plid;

View file

@ -404,7 +404,13 @@ enum {
BLK_MQ_F_STACKING = 1 << 2,
BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
BLK_MQ_F_BLOCKING = 1 << 5,
/* Do not allow an I/O scheduler to be configured. */
BLK_MQ_F_NO_SCHED = 1 << 6,
/*
* Select 'none' during queue registration in case of a single hwq
* or shared hwqs instead of 'mq-deadline'.
*/
BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@ -426,18 +432,14 @@ enum {
((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
struct lock_class_key *lkclass);
#define blk_mq_alloc_disk(set, queuedata) \
({ \
static struct lock_class_key __key; \
struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata); \
\
if (!IS_ERR(__disk)) \
lockdep_init_map(&__disk->lockdep_map, \
"(bio completion)", &__key, 0); \
__disk; \
__blk_mq_alloc_disk(set, queuedata, &__key); \
})
struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
void *queuedata);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);

View file

@ -34,14 +34,10 @@ struct block_device {
void * bd_holder;
int bd_holders;
bool bd_write_holder;
#ifdef CONFIG_SYSFS
struct list_head bd_holder_disks;
#endif
struct kobject *bd_holder_dir;
u8 bd_partno;
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
struct gendisk * bd_disk;
struct backing_dev_info *bd_bdi;
/* The counter of freeze processes */
int bd_fsfreeze_count;
@ -281,6 +277,7 @@ struct bio {
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
/*
* bio flags
@ -301,6 +298,7 @@ enum {
BIO_TRACKED, /* set if bio goes through the rq_qos path */
BIO_REMAPPED,
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
BIO_FLAG_LAST
};

View file

@ -11,14 +11,12 @@
#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
#include <linux/mempool.h>
#include <linux/pfn.h>
#include <linux/bio.h>
#include <linux/stringify.h>
#include <linux/gfp.h>
#include <linux/bsg.h>
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
@ -28,14 +26,11 @@
#include <linux/sbitmap.h>
struct module;
struct scsi_ioctl_command;
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
@ -275,9 +270,6 @@ enum blk_queue_state {
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
/*
* Zoned block device models (zoned limit).
*
@ -398,8 +390,6 @@ struct request_queue {
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
@ -424,6 +414,8 @@ struct request_queue {
spinlock_t queue_lock;
struct gendisk *disk;
/*
* queue kobject
*/
@ -506,11 +498,6 @@ struct request_queue {
unsigned int max_active_zones;
#endif /* CONFIG_BLK_DEV_ZONED */
/*
* sg stuff
*/
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
struct mutex debugfs_mutex;
#ifdef CONFIG_BLK_DEV_IO_TRACE
@ -537,10 +524,6 @@ struct request_queue {
int mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
struct bsg_class_device bsg_dev;
#endif
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
@ -664,8 +647,6 @@ extern void blk_clear_pm_only(struct request_queue *q);
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
(dir), (attrs))
#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
static inline bool queue_is_mq(struct request_queue *q)
{
return q->mq_ops;
@ -888,16 +869,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
extern void blk_queue_split(struct bio **);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
unsigned int, void __user *);
extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
unsigned int, void __user *);
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
@ -941,6 +912,10 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
#define SECTOR_SIZE (1 << SECTOR_SHIFT)
#endif
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
#define SECTOR_MASK (PAGE_SECTORS - 1)
/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
@ -1139,7 +1114,7 @@ void blk_queue_zone_write_granularity(struct request_queue *q,
unsigned int size);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
void blk_queue_update_readahead(struct request_queue *q);
void disk_update_readahead(struct gendisk *disk);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
@ -1346,8 +1321,6 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
gfp_mask, 0);
}
extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
static inline bool bdev_is_partition(struct block_device *bdev)
{
return bdev->bd_partno;
@ -1376,6 +1349,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q)
return q->limits.max_sectors;
}
static inline unsigned int queue_max_bytes(struct request_queue *q)
{
return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
}
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
@ -1521,6 +1499,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
return offset << SECTOR_SHIFT;
}
/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}
static inline int bdev_discard_alignment(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@ -1855,6 +1849,13 @@ struct block_device_operations {
char *(*devnode)(struct gendisk *disk, umode_t *mode);
struct module *owner;
const struct pr_ops *pr_ops;
/*
* Special callback for probing GPT entry at a given sector.
* Needed by Android devices, used by GPT scanner and MMC blk
* driver.
*/
int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
};
#ifdef CONFIG_COMPAT
@ -1984,8 +1985,6 @@ void blkdev_put_no_open(struct block_device *bdev);
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
void bdev_add(struct block_device *bdev, dev_t dev);
struct block_device *I_BDEV(struct inode *inode);
struct block_device *bdgrab(struct block_device *bdev);
void bdput(struct block_device *);
int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
loff_t lend);

View file

@ -23,22 +23,73 @@ struct ctl_table_header;
struct task_struct;
#ifdef CONFIG_CGROUP_BPF
extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
#define BPF_CGROUP_STORAGE_NEST_MAX 8
struct bpf_cgroup_storage_info {
struct task_struct *task;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
enum cgroup_bpf_attach_type {
CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
CGROUP_INET_INGRESS = 0,
CGROUP_INET_EGRESS,
CGROUP_INET_SOCK_CREATE,
CGROUP_SOCK_OPS,
CGROUP_DEVICE,
CGROUP_INET4_BIND,
CGROUP_INET6_BIND,
CGROUP_INET4_CONNECT,
CGROUP_INET6_CONNECT,
CGROUP_INET4_POST_BIND,
CGROUP_INET6_POST_BIND,
CGROUP_UDP4_SENDMSG,
CGROUP_UDP6_SENDMSG,
CGROUP_SYSCTL,
CGROUP_UDP4_RECVMSG,
CGROUP_UDP6_RECVMSG,
CGROUP_GETSOCKOPT,
CGROUP_SETSOCKOPT,
CGROUP_INET4_GETPEERNAME,
CGROUP_INET6_GETPEERNAME,
CGROUP_INET4_GETSOCKNAME,
CGROUP_INET6_GETSOCKNAME,
CGROUP_INET_SOCK_RELEASE,
MAX_CGROUP_BPF_ATTACH_TYPE
};
/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
* to use bpf cgroup storage simultaneously.
*/
DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
#define CGROUP_ATYPE(type) \
case BPF_##type: return type
static inline enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
{
switch (attach_type) {
CGROUP_ATYPE(CGROUP_INET_INGRESS);
CGROUP_ATYPE(CGROUP_INET_EGRESS);
CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
CGROUP_ATYPE(CGROUP_SOCK_OPS);
CGROUP_ATYPE(CGROUP_DEVICE);
CGROUP_ATYPE(CGROUP_INET4_BIND);
CGROUP_ATYPE(CGROUP_INET6_BIND);
CGROUP_ATYPE(CGROUP_INET4_CONNECT);
CGROUP_ATYPE(CGROUP_INET6_CONNECT);
CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
CGROUP_ATYPE(CGROUP_SYSCTL);
CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
CGROUP_ATYPE(CGROUP_GETSOCKOPT);
CGROUP_ATYPE(CGROUP_SETSOCKOPT);
CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
default:
return CGROUP_BPF_ATTACH_TYPE_INVALID;
}
}
#undef CGROUP_ATYPE
extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
#define for_each_cgroup_storage_type(stype) \
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
@ -80,15 +131,15 @@ struct bpf_prog_array;
struct cgroup_bpf {
/* array of effective progs in this cgroup */
struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
/* attached progs to this cgroup and attach flags
* when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
* have either zero or one element
* when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
*/
struct list_head progs[MAX_BPF_ATTACH_TYPE];
u32 flags[MAX_BPF_ATTACH_TYPE];
struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
/* list of cgroup shared storages */
struct list_head storages;
@ -128,28 +179,28 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
enum bpf_attach_type type);
enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sk(struct sock *sk,
enum bpf_attach_type type);
enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
enum bpf_attach_type type,
enum cgroup_bpf_attach_type atype,
void *t_ctx,
u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
enum bpf_attach_type type);
enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum bpf_attach_type type);
short access, enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
struct ctl_table *table, int write,
char **buf, size_t *pcount, loff_t *ppos,
enum bpf_attach_type type);
enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
int *optname, char __user *optval,
@ -172,44 +223,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
return BPF_CGROUP_STORAGE_SHARED;
}
static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
*storage[MAX_BPF_CGROUP_STORAGE_TYPE])
{
enum bpf_cgroup_storage_type stype;
int i, err = 0;
preempt_disable();
for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
continue;
this_cpu_write(bpf_cgroup_storage_info[i].task, current);
for_each_cgroup_storage_type(stype)
this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
storage[stype]);
goto out;
}
err = -EBUSY;
WARN_ON_ONCE(1);
out:
preempt_enable();
return err;
}
static inline void bpf_cgroup_storage_unset(void)
{
int i;
for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
continue;
this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
return;
}
}
struct bpf_cgroup_storage *
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
void *key, bool locked);
@ -230,9 +243,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
BPF_CGROUP_INET_INGRESS); \
CGROUP_INET_INGRESS); \
\
__ret; \
})
@ -240,54 +253,54 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
if (sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
BPF_CGROUP_INET_EGRESS); \
CGROUP_INET_EGRESS); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(type)) { \
__ret = __cgroup_bpf_run_filter_sk(sk, type); \
if (cgroup_bpf_enabled(atype)) { \
__ret = __cgroup_bpf_run_filter_sk(sk, atype); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
({ \
u32 __unused_flags; \
int __ret = 0; \
if (cgroup_bpf_enabled(type)) \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
if (cgroup_bpf_enabled(atype)) \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
NULL, \
&__unused_flags); \
__ret; \
})
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
({ \
u32 __unused_flags; \
int __ret = 0; \
if (cgroup_bpf_enabled(type)) { \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
t_ctx, \
&__unused_flags); \
release_sock(sk); \
@ -300,13 +313,13 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
* (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
* should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
*/
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
({ \
u32 __flags = 0; \
int __ret = 0; \
if (cgroup_bpf_enabled(type)) { \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
NULL, &__flags); \
release_sock(sk); \
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
@ -316,33 +329,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
})
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
(sk)->sk_prot->pre_connect)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
* fullsock and its parent fullsock cannot be traced by
@ -362,33 +375,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
sock_ops, \
BPF_CGROUP_SOCK_OPS); \
CGROUP_SOCK_OPS); \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
BPF_CGROUP_SOCK_OPS); \
CGROUP_SOCK_OPS); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
__ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
access, \
BPF_CGROUP_DEVICE); \
CGROUP_DEVICE); \
\
__ret; \
})
@ -397,10 +410,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
buf, count, pos, \
BPF_CGROUP_SYSCTL); \
CGROUP_SYSCTL); \
__ret; \
})
@ -408,7 +421,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
@ -419,7 +432,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
get_user(__ret, optlen); \
__ret; \
})
@ -428,7 +441,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
max_optlen, retval) \
({ \
int __ret = retval; \
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
tcp_bpf_bypass_getsockopt, \
@ -443,7 +456,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
optlen, retval) \
({ \
int __ret = retval; \
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_getsockopt_kern( \
sock, level, optname, optval, optlen, retval); \
__ret; \
@ -487,9 +500,6 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
static inline int bpf_cgroup_storage_set(
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
static inline void bpf_cgroup_storage_unset(void) {}
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
@ -505,14 +515,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
#define cgroup_bpf_enabled(type) (0)
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
#define cgroup_bpf_enabled(atype) (0)
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
@ -524,7 +534,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \

View file

@ -168,6 +168,7 @@ struct bpf_map {
u32 max_entries;
u32 map_flags;
int spin_lock_off; /* >=0 valid offset, <0 error */
int timer_off; /* >=0 valid offset, <0 error */
u32 id;
int numa_node;
u32 btf_key_type_id;
@ -197,30 +198,53 @@ static inline bool map_value_has_spin_lock(const struct bpf_map *map)
return map->spin_lock_off >= 0;
}
static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
static inline bool map_value_has_timer(const struct bpf_map *map)
{
if (likely(!map_value_has_spin_lock(map)))
return;
*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
(struct bpf_spin_lock){};
return map->timer_off >= 0;
}
/* copy everything but bpf_spin_lock */
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
if (unlikely(map_value_has_spin_lock(map)))
*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
(struct bpf_spin_lock){};
if (unlikely(map_value_has_timer(map)))
*(struct bpf_timer *)(dst + map->timer_off) =
(struct bpf_timer){};
}
/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
if (unlikely(map_value_has_spin_lock(map))) {
u32 off = map->spin_lock_off;
u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;
memcpy(dst, src, off);
memcpy(dst + off + sizeof(struct bpf_spin_lock),
src + off + sizeof(struct bpf_spin_lock),
map->value_size - off - sizeof(struct bpf_spin_lock));
if (unlikely(map_value_has_spin_lock(map))) {
s_off = map->spin_lock_off;
s_sz = sizeof(struct bpf_spin_lock);
} else if (unlikely(map_value_has_timer(map))) {
t_off = map->timer_off;
t_sz = sizeof(struct bpf_timer);
}
if (unlikely(s_sz || t_sz)) {
if (s_off < t_off || !s_sz) {
swap(s_off, t_off);
swap(s_sz, t_sz);
}
memcpy(dst, src, t_off);
memcpy(dst + t_off + t_sz,
src + t_off + t_sz,
s_off - t_off - t_sz);
memcpy(dst + s_off + s_sz,
src + s_off + s_sz,
map->value_size - s_off - s_sz);
} else {
memcpy(dst, src, map->value_size);
}
}
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
void bpf_timer_cancel_and_free(void *timer);
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
struct bpf_offload_dev;
@ -314,6 +338,7 @@ enum bpf_arg_type {
ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
__BPF_ARG_TYPE_MAX,
};
@ -554,6 +579,11 @@ struct btf_func_model {
*/
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
/* Store IP address of the caller on the trampoline stack,
* so it's available for trampoline's programs.
*/
#define BPF_TRAMP_F_IP_ARG BIT(3)
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
*/
@ -1073,7 +1103,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
/* an array of programs to be executed under rcu_lock.
*
* Typical usage:
* ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
* ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
*
* the structure returned by bpf_prog_array_alloc() should be populated
* with program pointers and the last pointer must be NULL.
@ -1084,7 +1114,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
*/
struct bpf_prog_array_item {
struct bpf_prog *prog;
struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
union {
struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
u64 bpf_cookie;
};
};
struct bpf_prog_array {
@ -1110,73 +1143,133 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
u64 bpf_cookie,
struct bpf_prog_array **new_array);
struct bpf_run_ctx {};
struct bpf_cg_run_ctx {
struct bpf_run_ctx run_ctx;
const struct bpf_prog_array_item *prog_item;
};
struct bpf_trace_run_ctx {
struct bpf_run_ctx run_ctx;
u64 bpf_cookie;
};
static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
{
struct bpf_run_ctx *old_ctx = NULL;
#ifdef CONFIG_BPF_SYSCALL
old_ctx = current->bpf_ctx;
current->bpf_ctx = new_ctx;
#endif
return old_ctx;
}
static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
{
#ifdef CONFIG_BPF_SYSCALL
current->bpf_ctx = old_ctx;
#endif
}
/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
/* BPF program asks to set CN on the packet. */
#define BPF_RET_SET_CN (1 << 0)
/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY,
* if bpf_cgroup_storage_set() failed, the rest of programs
* will not execute. This should be a really rare scenario
* as it requires BPF_CGROUP_STORAGE_NEST_MAX number of
* preemptions all between bpf_cgroup_storage_set() and
* bpf_cgroup_storage_unset() on the same cpu.
*/
#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \
({ \
struct bpf_prog_array_item *_item; \
struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \
u32 _ret = 1; \
u32 func_ret; \
migrate_disable(); \
rcu_read_lock(); \
_array = rcu_dereference(array); \
_item = &_array->items[0]; \
while ((_prog = READ_ONCE(_item->prog))) { \
if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
break; \
func_ret = func(_prog, ctx); \
_ret &= (func_ret & 1); \
*(ret_flags) |= (func_ret >> 1); \
bpf_cgroup_storage_unset(); \
_item++; \
} \
rcu_read_unlock(); \
migrate_enable(); \
_ret; \
})
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
({ \
struct bpf_prog_array_item *_item; \
struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \
u32 _ret = 1; \
migrate_disable(); \
rcu_read_lock(); \
_array = rcu_dereference(array); \
if (unlikely(check_non_null && !_array))\
goto _out; \
_item = &_array->items[0]; \
while ((_prog = READ_ONCE(_item->prog))) { \
if (!set_cg_storage) { \
_ret &= func(_prog, ctx); \
} else { \
if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
break; \
_ret &= func(_prog, ctx); \
bpf_cgroup_storage_unset(); \
} \
_item++; \
} \
_out: \
rcu_read_unlock(); \
migrate_enable(); \
_ret; \
})
static __always_inline u32
BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog,
u32 *ret_flags)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx;
u32 ret = 1;
u32 func_ret;
migrate_disable();
rcu_read_lock();
array = rcu_dereference(array_rcu);
item = &array->items[0];
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
while ((prog = READ_ONCE(item->prog))) {
run_ctx.prog_item = item;
func_ret = run_prog(prog, ctx);
ret &= (func_ret & 1);
*(ret_flags) |= (func_ret >> 1);
item++;
}
bpf_reset_run_ctx(old_run_ctx);
rcu_read_unlock();
migrate_enable();
return ret;
}
static __always_inline u32
BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_cg_run_ctx run_ctx;
u32 ret = 1;
migrate_disable();
rcu_read_lock();
array = rcu_dereference(array_rcu);
item = &array->items[0];
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
while ((prog = READ_ONCE(item->prog))) {
run_ctx.prog_item = item;
ret &= run_prog(prog, ctx);
item++;
}
bpf_reset_run_ctx(old_run_ctx);
rcu_read_unlock();
migrate_enable();
return ret;
}
static __always_inline u32
BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
const struct bpf_prog_array *array;
struct bpf_run_ctx *old_run_ctx;
struct bpf_trace_run_ctx run_ctx;
u32 ret = 1;
migrate_disable();
rcu_read_lock();
array = rcu_dereference(array_rcu);
if (unlikely(!array))
goto out;
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
while ((prog = READ_ONCE(item->prog))) {
run_ctx.bpf_cookie = item->bpf_cookie;
ret &= run_prog(prog, ctx);
item++;
}
bpf_reset_run_ctx(old_run_ctx);
out:
rcu_read_unlock();
migrate_enable();
return ret;
}
/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
* so BPF programs can request cwr for TCP packets.
@ -1205,7 +1298,7 @@ _out: \
u32 _flags = 0; \
bool _cn; \
u32 _ret; \
_ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \
_ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
_cn = _flags & BPF_RET_SET_CN; \
if (_ret) \
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
@ -1214,12 +1307,6 @@ _out: \
_ret; \
})
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
__BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
__BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
extern struct mutex bpf_stats_enabled_mutex;
@ -1398,6 +1485,9 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
struct seq_file *seq);
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
struct bpf_link_info *info);
typedef const struct bpf_func_proto *
(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
const struct bpf_prog *prog);
enum bpf_iter_feature {
BPF_ITER_RESCHED = BIT(0),
@ -1410,6 +1500,7 @@ struct bpf_iter_reg {
bpf_iter_detach_target_t detach_target;
bpf_iter_show_fdinfo_t show_fdinfo;
bpf_iter_fill_link_info_t fill_link_info;
bpf_iter_get_func_proto_t get_func_proto;
u32 ctx_arg_info_size;
u32 feature;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
@ -1432,6 +1523,8 @@ struct bpf_iter__bpf_map_elem {
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
const struct bpf_func_proto *
bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
@ -1509,12 +1602,12 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
struct bpf_prog *xdp_prog, struct bpf_map *map,
bool exclude_ingress);
bool dev_map_can_have_prog(struct bpf_map *map);
void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
bool cpu_map_prog_allowed(struct bpf_map *map);
int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
struct sk_buff *skb);
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
@ -1711,6 +1804,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
return 0;
}
static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
struct sk_buff *skb)
{
return -EOPNOTSUPP;
}
static inline bool cpu_map_prog_allowed(struct bpf_map *map)
{
return false;
@ -1852,6 +1951,12 @@ void bpf_map_offload_map_free(struct bpf_map *map);
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
void sock_map_unhash(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr)
@ -1884,24 +1989,6 @@ static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
{
return -ENOTSUPP;
}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
void sock_map_unhash(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags);
#else
static inline void bpf_sk_reuseport_detach(struct sock *sk)
{
}
#ifdef CONFIG_BPF_SYSCALL
static inline int sock_map_get_from_fd(const union bpf_attr *attr,
@ -1921,7 +2008,21 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
void *value);
int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags);
#else
static inline void bpf_sk_reuseport_detach(struct sock *sk)
{
}
#ifdef CONFIG_BPF_SYSCALL
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
void *key, void *value)
{
@ -1998,9 +2099,8 @@ extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);

View file

@ -136,3 +136,6 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
#endif
#ifdef CONFIG_PERF_EVENTS
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
#endif

View file

@ -53,7 +53,14 @@ struct bpf_reg_state {
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
struct bpf_map *map_ptr;
struct {
struct bpf_map *map_ptr;
/* To distinguish map lookups from outer map
* the map_uid is non-zero for registers
* pointing to inner maps.
*/
u32 map_uid;
};
/* for PTR_TO_BTF_ID */
struct {
@ -201,12 +208,19 @@ struct bpf_func_state {
* zero == main subprog
*/
u32 subprogno;
/* Every bpf_timer_start will increment async_entry_cnt.
* It's used to distinguish:
* void foo(void) { for(;;); }
* void foo(void) { bpf_timer_set_callback(,foo); }
*/
u32 async_entry_cnt;
bool in_callback_fn;
bool in_async_callback_fn;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
struct bpf_reference_state *refs;
int allocated_stack;
bool in_callback_fn;
struct bpf_stack_state *stack;
};
@ -392,6 +406,7 @@ struct bpf_subprog_info {
bool has_tail_call;
bool tail_call_reachable;
bool has_ld_abs;
bool is_async_cb;
};
/* single container for all structs

View file

@ -62,9 +62,17 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
}
static inline void *memdup_bpfptr(bpfptr_t src, size_t len)
static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)
{
return memdup_sockptr((sockptr_t) src, len);
void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
if (copy_from_bpfptr(p, src, len)) {
kvfree(p);
return ERR_PTR(-EFAULT);
}
return p;
}
static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)

View file

@ -12,6 +12,7 @@
#include <linux/blkdev.h>
#include <scsi/scsi_request.h>
struct bsg_job;
struct request;
struct device;
struct scatterlist;

View file

@ -4,36 +4,16 @@
#include <uapi/linux/bsg.h>
struct request;
struct bsg_device;
struct device;
struct request_queue;
#ifdef CONFIG_BLK_DEV_BSG
struct bsg_ops {
int (*check_proto)(struct sg_io_v4 *hdr);
int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
fmode_t mode);
int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
void (*free_rq)(struct request *rq);
};
typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
fmode_t mode, unsigned int timeout);
struct bsg_class_device {
struct device *class_dev;
int minor;
struct request_queue *queue;
const struct bsg_ops *ops;
};
struct bsg_device *bsg_register_queue(struct request_queue *q,
struct device *parent, const char *name,
bsg_sg_io_fn *sg_io_fn);
void bsg_unregister_queue(struct bsg_device *bcd);
int bsg_register_queue(struct request_queue *q, struct device *parent,
const char *name, const struct bsg_ops *ops);
int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
void bsg_unregister_queue(struct request_queue *q);
#else
static inline int bsg_scsi_register_queue(struct request_queue *q,
struct device *parent)
{
return 0;
}
static inline void bsg_unregister_queue(struct request_queue *q)
{
}
#endif /* CONFIG_BLK_DEV_BSG */
#endif /* _LINUX_BSG_H */

View file

@ -99,6 +99,7 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
int btf_find_timer(const struct btf *btf, const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,

View file

@ -82,6 +82,9 @@ __BTF_ID_LIST(name, globl)
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
BTF_ID_LIST(name) \
BTF_ID(prefix, typename)
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
BTF_ID_LIST_GLOBAL(name) \
BTF_ID(prefix, typename)
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
@ -148,6 +151,7 @@ extern struct btf_id_set name;
#define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1];
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
#define BTF_SET_END(name)
@ -172,7 +176,8 @@ extern struct btf_id_set name;
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock)
enum {
#define BTF_SOCK_TYPE(name, str) name,
@ -184,4 +189,6 @@ MAX_BTF_SOCK_TYPE,
extern u32 btf_sock_ids[];
#endif
extern u32 btf_task_struct_ids[];
#endif

View file

@ -409,7 +409,7 @@ static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
static inline void invalidate_bh_lrus_cpu(int cpu) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
#define buffer_heads_over_limit 0
#endif /* CONFIG_BLOCK */

View file

@ -4,9 +4,10 @@
*
* Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
*/
#ifndef __LINUX_BVEC_ITER_H
#define __LINUX_BVEC_ITER_H
#ifndef __LINUX_BVEC_H
#define __LINUX_BVEC_H
#include <linux/highmem.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/limits.h>
@ -183,4 +184,61 @@ static inline void bvec_advance(const struct bio_vec *bvec,
}
}
#endif /* __LINUX_BVEC_ITER_H */
/**
* bvec_kmap_local - map a bvec into the kernel virtual address space
* @bvec: bvec to map
*
* Must be called on single-page bvecs only. Call kunmap_local on the returned
* address to unmap.
*/
static inline void *bvec_kmap_local(struct bio_vec *bvec)
{
return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
}
/**
* memcpy_from_bvec - copy data from a bvec
* @bvec: bvec to copy from
*
* Must be called on single-page bvecs only.
*/
static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
{
memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
}
/**
* memcpy_to_bvec - copy data to a bvec
* @bvec: bvec to copy to
*
* Must be called on single-page bvecs only.
*/
static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
{
memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
}
/**
* memzero_bvec - zero all data in a bvec
* @bvec: bvec to zero
*
* Must be called on single-page bvecs only.
*/
static inline void memzero_bvec(struct bio_vec *bvec)
{
memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
}
/**
* bvec_virt - return the virtual address for a bvec
* @bvec: bvec to return the virtual address for
*
* Note: the caller must ensure that @bvec->bv_page is not a highmem page.
*/
static inline void *bvec_virt(struct bio_vec *bvec)
{
WARN_ON_ONCE(PageHighMem(bvec->bv_page));
return page_address(bvec->bv_page) + bvec->bv_offset;
}
#endif /* __LINUX_BVEC_H */

View file

@ -37,7 +37,7 @@
* quanta, from when the bit is sent on the TX pin to when it is
* received on the RX pin of the transmitter. Possible options:
*
* O: automatic mode. The controller dynamically measure @tdcv
* 0: automatic mode. The controller dynamically measures @tdcv
* for each transmitted CAN FD frame.
*
* Other values: manual mode. Use the fixed provided value.
@ -45,7 +45,7 @@
* @tdco: Transmitter Delay Compensation Offset. Offset value, in time
* quanta, defining the distance between the start of the bit
* reception on the RX pin of the transceiver and the SSP
* position such as SSP = @tdcv + @tdco.
* position such that SSP = @tdcv + @tdco.
*
* If @tdco is zero, then TDC is disabled and both @tdcv and
* @tdcf should be ignored.

View file

@ -32,6 +32,12 @@ enum can_mode {
CAN_MODE_SLEEP
};
enum can_termination_gpio {
CAN_TERMINATION_GPIO_DISABLED = 0,
CAN_TERMINATION_GPIO_ENABLED,
CAN_TERMINATION_GPIO_MAX,
};
/*
* CAN common private data
*/
@ -55,6 +61,8 @@ struct can_priv {
unsigned int termination_const_cnt;
const u16 *termination_const;
u16 termination;
struct gpio_desc *termination_gpio;
u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX];
enum can_state state;

View file

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _CAN_PLATFORM_FLEXCAN_H
#define _CAN_PLATFORM_FLEXCAN_H
struct flexcan_platform_data {
u32 clock_frequency;
u8 clk_src;
};
#endif /* _CAN_PLATFORM_FLEXCAN_H */

View file

@ -20,6 +20,7 @@ struct can_rx_offload {
bool drop);
struct sk_buff_head skb_queue;
struct sk_buff_head skb_irq_queue;
u32 skb_queue_len_max;
unsigned int mb_first;
@ -48,14 +49,11 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
void can_rx_offload_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
{
napi_schedule(&offload->napi);
}
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
{
napi_disable(&offload->napi);

View file

@ -86,11 +86,13 @@ struct cdrom_device_ops {
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
/* driver specifications */
const int capability; /* capability flags */
/* handle uniform packets for scsi type devices (scsi,atapi) */
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
u32 lba, u32 nframes, u8 *last_sense);
/* driver specifications */
const int capability; /* capability flags */
};
int cdrom_multisession(struct cdrom_device_info *cdi,

View file

@ -299,6 +299,7 @@ enum {
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
CEPH_SESSION_REJECT,
CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
extern const char *ceph_session_op_name(int op);

View file

@ -342,7 +342,7 @@ struct clk_fixed_rate {
unsigned long flags;
};
#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
extern const struct clk_ops clk_fixed_rate_ops;
struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
@ -1001,6 +1001,12 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
* CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are
* used for the divider register. Setting this flag makes the register
* accesses big endian.
* CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might
* be saturated and the caller will get quite far from the good enough
* approximation. Instead the caller may require, by setting this flag,
* to shift left by a few bits in case, when the asked one is quite small
* to satisfy the desired range of denominator. It assumes that on the
* caller's side the power-of-two capable prescaler exists.
*/
struct clk_fractional_divider {
struct clk_hw hw;
@ -1022,8 +1028,8 @@ struct clk_fractional_divider {
#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1)
#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2)
extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
@ -1069,9 +1075,9 @@ struct clk_multiplier {
#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_multiplier_ops;

View file

@ -137,6 +137,32 @@
#define AT91_PMC_PLLADIV2_ON (1 << 12)
#define AT91_PMC_H32MXDIV BIT(24)
#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */
#define AT91_PMC_MCR_V2_ID_MSK (0xF)
#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK)
#define AT91_PMC_MCR_V2_CMD (1 << 7)
#define AT91_PMC_MCR_V2_DIV (7 << 8)
#define AT91_PMC_MCR_V2_DIV1 (0 << 8)
#define AT91_PMC_MCR_V2_DIV2 (1 << 8)
#define AT91_PMC_MCR_V2_DIV4 (2 << 8)
#define AT91_PMC_MCR_V2_DIV8 (3 << 8)
#define AT91_PMC_MCR_V2_DIV16 (4 << 8)
#define AT91_PMC_MCR_V2_DIV32 (5 << 8)
#define AT91_PMC_MCR_V2_DIV64 (6 << 8)
#define AT91_PMC_MCR_V2_DIV3 (7 << 8)
#define AT91_PMC_MCR_V2_CSS (0x1F << 16)
#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16)
#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16)
#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16)
#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16)
#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16)
#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16)
#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16)
#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16)
#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16)
#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16)
#define AT91_PMC_MCR_V2_EN (1 << 28)
#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */

View file

@ -1,46 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Parsing command line, get the partitions information.
*
* Written by Cai Zhiyong <caizhiyong@huawei.com>
*
*/
#ifndef CMDLINEPARSEH
#define CMDLINEPARSEH
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/slab.h>
/* partition flags */
#define PF_RDONLY 0x01 /* Device is read only */
#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
struct cmdline_subpart {
char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
sector_t from;
sector_t size;
int flags;
struct cmdline_subpart *next_subpart;
};
struct cmdline_parts {
char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
unsigned int nr_subparts;
struct cmdline_subpart *subpart;
struct cmdline_parts *next_parts;
};
void cmdline_parts_free(struct cmdline_parts **parts);
int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
const char *bdev);
int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
int slot,
int (*add_part)(int, struct cmdline_subpart *, void *),
void *param);
#endif /* CMDLINEPARSEH */

View file

@ -84,6 +84,8 @@ static inline unsigned long compact_gap(unsigned int order)
extern unsigned int sysctl_compaction_proactiveness;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos);
extern int compaction_proactiveness_sysctl_handler(struct ctl_table *table,
int write, void *buffer, size_t *length, loff_t *ppos);
extern int sysctl_extfrag_threshold;
extern int sysctl_compact_unevictable_allowed;

View file

@ -20,11 +20,8 @@
#include <linux/unistd.h>
#include <asm/compat.h>
#ifdef CONFIG_COMPAT
#include <asm/siginfo.h>
#include <asm/signal.h>
#endif
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
/*
@ -95,8 +92,6 @@ struct compat_iovec {
compat_size_t iov_len;
};
#ifdef CONFIG_COMPAT
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
#endif
@ -131,9 +126,11 @@ struct compat_tms {
#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
#ifndef compat_sigset_t
typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
#endif
int set_compat_user_sigmask(const compat_sigset_t __user *umask,
size_t sigsetsize);
@ -384,6 +381,7 @@ struct compat_keyctl_kdf_params {
__u32 __spare[8];
};
struct compat_stat;
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@ -397,14 +395,6 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
void copy_siginfo_to_external32(struct compat_siginfo *to,
const struct kernel_siginfo *from);
int copy_siginfo_from_user32(kernel_siginfo_t *to,
@ -428,7 +418,7 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
unsigned int size)
{
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
#ifdef __BIG_ENDIAN
#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT)
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
@ -521,8 +511,6 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
struct epoll_event; /* fortunately, this one is fixed-layout */
extern void __user *compat_alloc_user_space(unsigned long len);
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
#define unsafe_compat_save_altstack(uss, sp, label) do { \
@ -809,26 +797,6 @@ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr
/* mm/fadvise.c: No generic prototype for fadvise64_64 */
/* mm/, CONFIG_MMU only */
asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
compat_ulong_t mode,
compat_ulong_t __user *nmask,
compat_ulong_t maxnode, compat_ulong_t flags);
asmlinkage long compat_sys_get_mempolicy(int __user *policy,
compat_ulong_t __user *nmask,
compat_ulong_t maxnode,
compat_ulong_t addr,
compat_ulong_t flags);
asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode);
asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
const compat_ulong_t __user *new_nodes);
asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
__u32 __user *pages,
const int __user *nodes,
int __user *status,
int flags);
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
@ -929,17 +897,6 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
/*
* For most but not all architectures, "am I in a compat syscall?" and
* "am I a compat task?" are the same question. For architectures on which
* they aren't the same question, arch code can override in_compat_syscall.
*/
#ifndef in_compat_syscall
static inline bool in_compat_syscall(void) { return is_compat_task(); }
#endif
/**
* ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
@ -969,6 +926,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user * buf);
#ifdef CONFIG_COMPAT
/*
* For most but not all architectures, "am I in a compat syscall?" and
* "am I a compat task?" are the same question. For architectures on which
* they aren't the same question, arch code can override in_compat_syscall.
*/
#ifndef in_compat_syscall
static inline bool in_compat_syscall(void) { return is_compat_task(); }
#endif
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
@ -978,6 +946,15 @@ static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
/*
* Some legacy ABIs like the i386 one use less than natural alignment for 64-bit
* types, and will need special compat treatment for that. Most architectures

View file

@ -220,6 +220,10 @@ struct coresight_sysfs_link {
* @nr_links: number of sysfs links created to other components from this
* device. These will appear in the "connections" group.
* @has_conns_grp: Have added a "connections" group for sysfs links.
* @feature_csdev_list: List of complex feature programming added to the device.
* @config_csdev_list: List of system configurations added to the device.
* @cscfg_csdev_lock: Protect the lists of configurations and features.
* @active_cscfg_ctxt: Context information for current active system configuration.
*/
struct coresight_device {
struct coresight_platform_data *pdata;
@ -241,6 +245,11 @@ struct coresight_device {
int nr_links;
bool has_conns_grp;
bool ect_enabled; /* true only if associated ect device is enabled */
/* system configuration and feature lists */
struct list_head feature_csdev_list;
struct list_head config_csdev_list;
spinlock_t cscfg_csdev_lock;
void *active_cscfg_ctxt;
};
/*

View file

@ -162,15 +162,15 @@ struct counter_count_ext {
void *priv;
};
enum counter_count_function {
COUNTER_COUNT_FUNCTION_INCREASE = 0,
COUNTER_COUNT_FUNCTION_DECREASE,
COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B,
COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
COUNTER_COUNT_FUNCTION_QUADRATURE_X4
enum counter_function {
COUNTER_FUNCTION_INCREASE = 0,
COUNTER_FUNCTION_DECREASE,
COUNTER_FUNCTION_PULSE_DIRECTION,
COUNTER_FUNCTION_QUADRATURE_X1_A,
COUNTER_FUNCTION_QUADRATURE_X1_B,
COUNTER_FUNCTION_QUADRATURE_X2_A,
COUNTER_FUNCTION_QUADRATURE_X2_B,
COUNTER_FUNCTION_QUADRATURE_X4
};
/**
@ -192,7 +192,7 @@ struct counter_count {
const char *name;
size_t function;
const enum counter_count_function *functions_list;
const enum counter_function *functions_list;
size_t num_functions;
struct counter_synapse *synapses;
@ -290,16 +290,16 @@ struct counter_device_state {
const struct attribute_group **groups;
};
enum counter_signal_value {
COUNTER_SIGNAL_LOW = 0,
COUNTER_SIGNAL_HIGH
enum counter_signal_level {
COUNTER_SIGNAL_LEVEL_LOW,
COUNTER_SIGNAL_LEVEL_HIGH,
};
/**
* struct counter_ops - Callbacks from driver
* @signal_read: optional read callback for Signal attribute. The read
* value of the respective Signal should be passed back via
* the val parameter.
* level of the respective Signal should be passed back via
* the level parameter.
* @count_read: optional read callback for Count attribute. The read
* value of the respective Count should be passed back via
* the val parameter.
@ -324,7 +324,7 @@ enum counter_signal_value {
struct counter_ops {
int (*signal_read)(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_value *val);
enum counter_signal_level *level);
int (*count_read)(struct counter_device *counter,
struct counter_count *count, unsigned long *val);
int (*count_write)(struct counter_device *counter,

View file

@ -9,10 +9,14 @@
#define _LINUX_CPUFREQ_H
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
@ -365,14 +369,17 @@ struct cpufreq_driver {
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
/* Will be called after the driver is fully initialized */
void (*ready)(struct cpufreq_policy *policy);
struct freq_attr **attr;
/* platform specific boost support code */
bool boost_enabled;
int (*set_boost)(struct cpufreq_policy *policy, int state);
/*
* Set by drivers that want to register with the energy model after the
* policy is properly initialized, but before the governor is started.
*/
void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
@ -995,6 +1002,55 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
return count;
}
static inline int parse_perf_domain(int cpu, const char *list_name,
const char *cell_name)
{
struct device_node *cpu_np;
struct of_phandle_args args;
int ret;
cpu_np = of_cpu_device_node_get(cpu);
if (!cpu_np)
return -ENODEV;
ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
&args);
if (ret < 0)
return ret;
of_node_put(cpu_np);
return args.args[0];
}
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
const char *cell_name, struct cpumask *cpumask)
{
int target_idx;
int cpu, ret;
ret = parse_perf_domain(pcpu, list_name, cell_name);
if (ret < 0)
return ret;
target_idx = ret;
cpumask_set_cpu(pcpu, cpumask);
for_each_possible_cpu(cpu) {
if (cpu == pcpu)
continue;
ret = parse_perf_domain(pcpu, list_name, cell_name);
if (ret < 0)
continue;
if (target_idx == ret)
cpumask_set_cpu(cpu, cpumask);
}
return target_idx;
}
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@ -1014,6 +1070,12 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
{
return false;
}
static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
const char *cell_name, struct cpumask *cpumask)
{
return -EOPNOTSUPP;
}
#endif
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
@ -1035,7 +1097,6 @@ void arch_set_freq_scale(const struct cpumask *cpus,
{
}
#endif
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
@ -1046,4 +1107,10 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
{
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
policy->related_cpus);
}
#endif /* _LINUX_CPUFREQ_H */

View file

@ -46,12 +46,14 @@ enum cpuhp_state {
CPUHP_ARM_OMAP_WAKE_DEAD,
CPUHP_IRQ_POLL_DEAD,
CPUHP_BLOCK_SOFTIRQ_DEAD,
CPUHP_BIO_DEAD,
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,

View file

@ -983,6 +983,44 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
nr_cpu_ids);
}
/**
* cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
* hex values of cpumask
*
* @buf: the buffer to copy into
* @mask: the cpumask to copy
* @off: in the string from which we are copying, we copy to @buf
* @count: the maximum number of bytes to print
*
* The function prints the cpumask into the buffer as hex values of
* cpumask; Typically used by bin_attribute to export cpumask bitmask
* ABI.
*
* Returns the length of how many bytes have been copied.
*/
static inline ssize_t
cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count);
}
/**
* cpumap_print_list_to_buf - copies the cpumask into the buffer as
* comma-separated list of cpus
*
* Everything is same with the above cpumap_print_bitmask_to_buf()
* except the print format.
*/
static inline ssize_t
cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
nr_cpu_ids, off, count);
}
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
(cpumask_t) { { \

View file

@ -10,13 +10,14 @@
#include <linux/pgtable.h> /* for pgprot_t */
#ifdef CONFIG_CRASH_DUMP
/* For IS_ENABLED(CONFIG_CRASH_DUMP) */
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
#ifdef CONFIG_CRASH_DUMP
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
extern void elfcorehdr_free(unsigned long long addr);
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);

268
include/linux/damon.h Normal file
View file

@ -0,0 +1,268 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* DAMON api
*
* Author: SeongJae Park <sjpark@amazon.de>
*/
#ifndef _DAMON_H_
#define _DAMON_H_
#include <linux/mutex.h>
#include <linux/time64.h>
#include <linux/types.h>
/* Minimal region size. Every damon_region is aligned by this. */
#define DAMON_MIN_REGION PAGE_SIZE
/**
* struct damon_addr_range - Represents an address region of [@start, @end).
* @start: Start address of the region (inclusive).
* @end: End address of the region (exclusive).
*/
struct damon_addr_range {
unsigned long start;
unsigned long end;
};
/**
* struct damon_region - Represents a monitoring target region.
* @ar: The address range of the region.
* @sampling_addr: Address of the sample for the next access check.
* @nr_accesses: Access frequency of this region.
* @list: List head for siblings.
*/
struct damon_region {
struct damon_addr_range ar;
unsigned long sampling_addr;
unsigned int nr_accesses;
struct list_head list;
};
/**
* struct damon_target - Represents a monitoring target.
* @id: Unique identifier for this target.
* @nr_regions: Number of monitoring target regions of this target.
* @regions_list: Head of the monitoring target regions of this target.
* @list: List head for siblings.
*
* Each monitoring context could have multiple targets. For example, a context
* for virtual memory address spaces could have multiple target processes. The
* @id of each target should be unique among the targets of the context. For
* example, in the virtual address monitoring context, it could be a pidfd or
* an address of an mm_struct.
*/
struct damon_target {
unsigned long id;
unsigned int nr_regions;
struct list_head regions_list;
struct list_head list;
};
struct damon_ctx;
/**
* struct damon_primitive Monitoring primitives for given use cases.
*
* @init: Initialize primitive-internal data structures.
* @update: Update primitive-internal data structures.
* @prepare_access_checks: Prepare next access check of target regions.
* @check_accesses: Check the accesses to target regions.
* @reset_aggregated: Reset aggregated accesses monitoring results.
* @target_valid: Determine if the target is valid.
* @cleanup: Clean up the context.
*
* DAMON can be extended for various address spaces and usages. For this,
* users should register the low level primitives for their target address
* space and usecase via the &damon_ctx.primitive. Then, the monitoring thread
* (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
* the monitoring, @update after each &damon_ctx.primitive_update_interval, and
* @check_accesses, @target_valid and @prepare_access_checks after each
* &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each
* &damon_ctx.aggr_interval.
*
* @init should initialize primitive-internal data structures. For example,
* this could be used to construct proper monitoring target regions and link
* those to @damon_ctx.adaptive_targets.
* @update should update the primitive-internal data structures. For example,
* this could be used to update monitoring target regions for current status.
* @prepare_access_checks should manipulate the monitoring regions to be
* prepared for the next access check.
* @check_accesses should check the accesses to each region that made after the
* last preparation and update the number of observed accesses of each region.
* It should also return max number of observed accesses that made as a result
* of its update. The value will be used for regions adjustment threshold.
* @reset_aggregated should reset the access monitoring results that aggregated
* by @check_accesses.
* @target_valid should check whether the target is still valid for the
* monitoring.
* @cleanup is called from @kdamond just before its termination.
*/
struct damon_primitive {
void (*init)(struct damon_ctx *context);
void (*update)(struct damon_ctx *context);
void (*prepare_access_checks)(struct damon_ctx *context);
unsigned int (*check_accesses)(struct damon_ctx *context);
void (*reset_aggregated)(struct damon_ctx *context);
bool (*target_valid)(void *target);
void (*cleanup)(struct damon_ctx *context);
};
/*
* struct damon_callback Monitoring events notification callbacks.
*
* @before_start: Called before starting the monitoring.
* @after_sampling: Called after each sampling.
* @after_aggregation: Called after each aggregation.
* @before_terminate: Called before terminating the monitoring.
* @private: User private data.
*
* The monitoring thread (&damon_ctx.kdamond) calls @before_start and
* @before_terminate just before starting and finishing the monitoring,
* respectively. Therefore, those are good places for installing and cleaning
* @private.
*
* The monitoring thread calls @after_sampling and @after_aggregation for each
* of the sampling intervals and aggregation intervals, respectively.
* Therefore, users can safely access the monitoring results without additional
* protection. For the reason, users are recommended to use these callback for
* the accesses to the results.
*
* If any callback returns non-zero, monitoring stops.
*/
struct damon_callback {
void *private;
int (*before_start)(struct damon_ctx *context);
int (*after_sampling)(struct damon_ctx *context);
int (*after_aggregation)(struct damon_ctx *context);
int (*before_terminate)(struct damon_ctx *context);
};
/**
* struct damon_ctx - Represents a context for each monitoring. This is the
* main interface that allows users to set the attributes and get the results
* of the monitoring.
*
* @sample_interval: The time between access samplings.
* @aggr_interval: The time between monitor results aggregations.
* @primitive_update_interval: The time between monitoring primitive updates.
*
* For each @sample_interval, DAMON checks whether each region is accessed or
* not. It aggregates and keeps the access information (number of accesses to
* each region) for @aggr_interval time. DAMON also checks whether the target
* memory regions need update (e.g., by ``mmap()`` calls from the application,
* in case of virtual memory monitoring) and applies the changes for each
* @primitive_update_interval. All time intervals are in micro-seconds.
* Please refer to &struct damon_primitive and &struct damon_callback for more
* detail.
*
* @kdamond: Kernel thread who does the monitoring.
* @kdamond_stop: Notifies whether kdamond should stop.
* @kdamond_lock: Mutex for the synchronizations with @kdamond.
*
* For each monitoring context, one kernel thread for the monitoring is
* created. The pointer to the thread is stored in @kdamond.
*
* Once started, the monitoring thread runs until explicitly required to be
* terminated or every monitoring target is invalid. The validity of the
* targets is checked via the &damon_primitive.target_valid of @primitive. The
* termination can also be explicitly requested by writing non-zero to
* @kdamond_stop. The thread sets @kdamond to NULL when it terminates.
* Therefore, users can know whether the monitoring is ongoing or terminated by
* reading @kdamond. Reads and writes to @kdamond and @kdamond_stop from
* outside of the monitoring thread must be protected by @kdamond_lock.
*
* Note that the monitoring thread protects only @kdamond and @kdamond_stop via
* @kdamond_lock. Accesses to other fields must be protected by themselves.
*
* @primitive: Set of monitoring primitives for given use cases.
* @callback: Set of callbacks for monitoring events notifications.
*
* @min_nr_regions: The minimum number of adaptive monitoring regions.
* @max_nr_regions: The maximum number of adaptive monitoring regions.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
*/
struct damon_ctx {
unsigned long sample_interval;
unsigned long aggr_interval;
unsigned long primitive_update_interval;
/* private: internal use only */
struct timespec64 last_aggregation;
struct timespec64 last_primitive_update;
/* public: */
struct task_struct *kdamond;
bool kdamond_stop;
struct mutex kdamond_lock;
struct damon_primitive primitive;
struct damon_callback callback;
unsigned long min_nr_regions;
unsigned long max_nr_regions;
struct list_head adaptive_targets;
};
#define damon_next_region(r) \
(container_of(r->list.next, struct damon_region, list))
#define damon_prev_region(r) \
(container_of(r->list.prev, struct damon_region, list))
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
#define damon_for_each_region_safe(r, next, t) \
list_for_each_entry_safe(r, next, &t->regions_list, list)
#define damon_for_each_target(t, ctx) \
list_for_each_entry(t, &(ctx)->adaptive_targets, list)
#define damon_for_each_target_safe(t, next, ctx) \
list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
#ifdef CONFIG_DAMON
struct damon_region *damon_new_region(unsigned long start, unsigned long end);
inline void damon_insert_region(struct damon_region *r,
struct damon_region *prev, struct damon_region *next,
struct damon_target *t);
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
struct damon_target *damon_new_target(unsigned long id);
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
void damon_free_target(struct damon_target *t);
void damon_destroy_target(struct damon_target *t);
unsigned int damon_nr_regions(struct damon_target *t);
struct damon_ctx *damon_new_ctx(void);
void damon_destroy_ctx(struct damon_ctx *ctx);
int damon_set_targets(struct damon_ctx *ctx,
unsigned long *ids, ssize_t nr_ids);
int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long primitive_upd_int,
unsigned long min_nr_reg, unsigned long max_nr_reg);
int damon_nr_running_ctxs(void);
int damon_start(struct damon_ctx **ctxs, int nr_ctxs);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
#endif /* CONFIG_DAMON */
#ifdef CONFIG_DAMON_VADDR
/* Monitoring primitives for virtual memory address spaces */
void damon_va_init(struct damon_ctx *ctx);
void damon_va_update(struct damon_ctx *ctx);
void damon_va_prepare_access_checks(struct damon_ctx *ctx);
unsigned int damon_va_check_accesses(struct damon_ctx *ctx);
bool damon_va_target_valid(void *t);
void damon_va_cleanup(struct damon_ctx *ctx);
void damon_va_set_primitives(struct damon_ctx *ctx);
#endif /* CONFIG_DAMON_VADDR */
#endif /* _DAMON_H */

View file

@ -41,7 +41,6 @@ struct dax_operations {
extern struct attribute_group dax_attribute_group;
#if IS_ENABLED(CONFIG_DAX)
struct dax_device *dax_get_by_host(const char *host);
struct dax_device *alloc_dax(void *private, const char *host,
const struct dax_operations *ops, unsigned long flags);
void put_dax(struct dax_device *dax_dev);
@ -58,8 +57,6 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
__set_dax_synchronous(dax_dev);
}
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
int blocksize, sector_t start, sector_t len);
/*
* Check if given mapping is supported by the file / underlying device.
*/
@ -73,10 +70,6 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
return dax_synchronous(dax_dev);
}
#else
static inline struct dax_device *dax_get_by_host(const char *host)
{
return NULL;
}
static inline struct dax_device *alloc_dax(void *private, const char *host,
const struct dax_operations *ops, unsigned long flags)
{
@ -106,12 +99,6 @@ static inline bool dax_synchronous(struct dax_device *dax_dev)
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
static inline bool dax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
sector_t len)
{
return false;
}
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
struct dax_device *dax_dev)
{
@ -122,22 +109,12 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
struct writeback_control;
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#if IS_ENABLED(CONFIG_FS_DAX)
bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
{
return __bdev_dax_supported(bdev, blocksize);
}
bool __generic_fsdax_supported(struct dax_device *dax_dev,
bool generic_fsdax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
sector_t sectors);
static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
sector_t sectors)
{
return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
sectors);
}
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
int blocksize, sector_t start, sector_t len);
static inline void fs_put_dax(struct dax_device *dax_dev)
{
@ -153,15 +130,11 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t st
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else
static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize)
{
return false;
}
#define generic_fsdax_supported NULL
static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
static inline bool dax_supported(struct dax_device *dax_dev,
struct block_device *bdev, int blocksize, sector_t start,
sector_t sectors)
sector_t len)
{
return false;
}

View file

@ -38,8 +38,8 @@ __printf(3, 4) __cold
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
__printf(3, 4) __cold
void dev_printk(const char *level, const struct device *dev,
const char *fmt, ...);
void _dev_printk(const char *level, const struct device *dev,
const char *fmt, ...);
__printf(2, 3) __cold
void _dev_emerg(const struct device *dev, const char *fmt, ...);
__printf(2, 3) __cold
@ -69,7 +69,7 @@ static inline void __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{}
static inline __printf(3, 4)
void dev_printk(const char *level, const struct device *dev,
void _dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
{}
@ -97,25 +97,57 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
#endif
/*
* Need to take variadic arguments even though we don't use them, as dev_fmt()
* may only just have been expanded and may result in multiple arguments.
*/
#define dev_printk_index_emit(level, fmt, ...) \
printk_index_subsys_emit("%s %s: ", level, fmt)
#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \
({ \
dev_printk_index_emit(level, fmt); \
_p_func(dev, fmt, ##__VA_ARGS__); \
})
/*
* Some callsites directly call dev_printk rather than going through the
* dev_<level> infrastructure, so we need to emit here as well as inside those
* level-specific macros. Only one index entry will be produced, either way,
* since dev_printk's `fmt` isn't known at compile time if going through the
* dev_<level> macros.
*
* dev_fmt() isn't called for dev_printk when used directly, as it's used by
* the dev_<level> macros internally which already have dev_fmt() processed.
*
* We also can't use dev_printk_index_wrap directly, because we have a separate
* level to process.
*/
#define dev_printk(level, dev, fmt, ...) \
({ \
dev_printk_index_emit(level, fmt); \
_dev_printk(level, dev, fmt, ##__VA_ARGS__); \
})
/*
* #defines for all the dev_<level> macros to prefix with whatever
* possible use of #define dev_fmt(fmt) ...
*/
#define dev_emerg(dev, fmt, ...) \
_dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_crit(dev, fmt, ...) \
_dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_alert(dev, fmt, ...) \
_dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_err(dev, fmt, ...) \
_dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_warn(dev, fmt, ...) \
_dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_notice(dev, fmt, ...) \
_dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_info(dev, fmt, ...) \
_dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_emerg(dev, fmt, ...) \
dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_crit(dev, fmt, ...) \
dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_alert(dev, fmt, ...) \
dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_err(dev, fmt, ...) \
dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_warn(dev, fmt, ...) \
dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_notice(dev, fmt, ...) \
dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__)
#define dev_info(dev, fmt, ...) \
dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__)
#if defined(CONFIG_DYNAMIC_DEBUG) || \
(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))

View file

@ -31,7 +31,7 @@ enum dm_queue_mode {
DM_TYPE_DAX_BIO_BASED = 3,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
union map_info {
void *ptr;
@ -151,7 +151,6 @@ typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *i);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
@ -603,6 +602,10 @@ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
#define DMEMIT_TARGET_NAME_VERSION(y) \
DMEMIT("target_name=%s,target_version=%u.%u.%u", \
(y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
/*
* Definitions of return values from target end_io function.
*/

View file

@ -424,6 +424,7 @@ struct dev_links_info {
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
* @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@ -533,6 +534,9 @@ struct device {
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
#ifdef CONFIG_SWIOTLB
struct io_tlb_mem *dma_io_tlb_mem;
#endif
/* arch specific additions */
struct dev_archdata archdata;

View file

@ -91,7 +91,7 @@ struct bus_type {
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
void (*sync_state)(struct device *dev);
int (*remove)(struct device *dev);
void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
int (*online)(struct device *dev);

View file

@ -38,6 +38,7 @@ struct dfl_device {
int id;
u16 type;
u16 feature_id;
u8 revision;
struct resource mmio_res;
int *irqs;
unsigned int num_irqs;

View file

@ -54,7 +54,7 @@ struct dma_buf_ops {
* device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
* allocation fullfills the DMA constraints of the new device. If this
* allocation fulfills the DMA constraints of the new device. If this
* is not the case, and the allocation cannot be moved, it should also
* fail the attach operation.
*
@ -96,6 +96,12 @@ struct dma_buf_ops {
* This is called automatically for non-dynamic importers from
* dma_buf_attach().
*
* Note that similar to non-dynamic exporters in their @map_dma_buf
* callback the driver must guarantee that the memory is available for
* use and cleared of any old data by the time this function returns.
* Drivers which pipeline their buffer moves internally must wait for
* all moves and clears to complete.
*
* Returns:
*
* 0 on success, negative error code on failure.
@ -144,9 +150,18 @@ struct dma_buf_ops {
* This is always called with the dmabuf->resv object locked when
* the dynamic_mapping flag is true.
*
* Note that for non-dynamic exporters the driver must guarantee that
* that the memory is available for use and cleared of any old data by
* the time this function returns. Drivers which pipeline their buffer
* moves internally must wait for all moves and clears to complete.
* Dynamic exporters do not need to follow this rule: For non-dynamic
* importers the buffer is already pinned through @pin, which has the
* same requirements. Dynamic importers otoh are required to obey the
* dma_resv fences.
*
* Returns:
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
* A &sg_table scatter list of the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
* with the provided &dma_buf_attachment. The addresses and lengths in
* the scatter list are PAGE_SIZE aligned.
@ -168,7 +183,7 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
* For static dma_buf handling this might also unpins the backing
* For static dma_buf handling this might also unpin the backing
* storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
@ -237,7 +252,7 @@ struct dma_buf_ops {
* This callback is used by the dma_buf_mmap() function
*
* Note that the mapping needs to be incoherent, userspace is expected
* to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
* to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
*
* Because dma-buf buffers have invariant size over their lifetime, the
* dma-buf core checks whether a vma is too large and rejects such
@ -274,27 +289,6 @@ struct dma_buf_ops {
/**
* struct dma_buf - shared buffer object
* @size: size of the buffer; invariant over the lifetime of the buffer.
* @file: file pointer used for sharing buffers across, and for refcounting.
* @attachments: list of dma_buf_attachment that denotes all devices attached,
* protected by dma_resv lock.
* @ops: dma_buf_ops associated with this buffer object.
* @lock: used internally to serialize list manipulation, attach/detach and
* vmap/unmap
* @vmapping_counter: used internally to refcnt the vmaps
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
* @name: userspace-provided name; useful for accounting and debugging,
* protected by @resv.
* @name_lock: spinlock to protect name access
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
* @poll: for userspace poll support
* @cb_excl: for userspace poll support
* @cb_shared: for userspace poll support
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@ -306,30 +300,152 @@ struct dma_buf_ops {
* Device DMA access is handled by the separate &struct dma_buf_attachment.
*/
struct dma_buf {
/**
* @size:
*
* Size of the buffer; invariant over the lifetime of the buffer.
*/
size_t size;
/**
* @file:
*
* File pointer used for sharing buffers across, and for refcounting.
* See dma_buf_get() and dma_buf_put().
*/
struct file *file;
/**
* @attachments:
*
* List of dma_buf_attachment that denotes all devices attached,
* protected by &dma_resv lock @resv.
*/
struct list_head attachments;
/** @ops: dma_buf_ops associated with this buffer object. */
const struct dma_buf_ops *ops;
/**
* @lock:
*
* Used internally to serialize list manipulation, attach/detach and
* vmap/unmap. Note that in many cases this is superseeded by
* dma_resv_lock() on @resv.
*/
struct mutex lock;
/**
* @vmapping_counter:
*
* Used internally to refcnt the vmaps returned by dma_buf_vmap().
* Protected by @lock.
*/
unsigned vmapping_counter;
/**
* @vmap_ptr:
* The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
*/
struct dma_buf_map vmap_ptr;
/**
* @exp_name:
*
* Name of the exporter; useful for debugging. See the
* DMA_BUF_SET_NAME IOCTL.
*/
const char *exp_name;
/**
* @name:
*
* Userspace-provided name; useful for accounting and debugging,
* protected by dma_resv_lock() on @resv and @name_lock for read access.
*/
const char *name;
/** @name_lock: Spinlock to protect name acces for read access. */
spinlock_t name_lock;
/**
* @owner:
*
* Pointer to exporter module; used for refcounting when exporter is a
* kernel module.
*/
struct module *owner;
/** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
/** @priv: exporter specific private data for this buffer object. */
void *priv;
/**
* @resv:
*
* Reservation object linked to this dma-buf.
*
* IMPLICIT SYNCHRONIZATION RULES:
*
* Drivers which support implicit synchronization of buffer access as
* e.g. exposed in `Implicit Fence Poll Support`_ must follow the
* below rules.
*
* - Drivers must add a shared fence through dma_resv_add_shared_fence()
* for anything the userspace API considers a read access. This highly
* depends upon the API and window system.
*
* - Similarly drivers must set the exclusive fence through
* dma_resv_add_excl_fence() for anything the userspace API considers
* write access.
*
* - Drivers may just always set the exclusive fence, since that only
* causes unecessarily synchronization, but no correctness issues.
*
* - Some drivers only expose a synchronous userspace API with no
* pipelining across drivers. These do not set any fences for their
* access. An example here is v4l.
*
* DYNAMIC IMPORTER RULES:
*
* Dynamic importers, see dma_buf_attachment_is_dynamic(), have
* additional constraints on how they set up fences:
*
* - Dynamic importers must obey the exclusive fence and wait for it to
* signal before allowing access to the buffer's underlying storage
* through the device.
*
* - Dynamic importers should set fences for any access that they can't
* disable immediately from their &dma_buf_attach_ops.move_notify
* callback.
*/
struct dma_resv *resv;
/* poll support */
/** @poll: for userspace poll support */
wait_queue_head_t poll;
/** @cb_excl: for userspace poll support */
/** @cb_shared: for userspace poll support */
struct dma_buf_poll_cb_t {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
__poll_t active;
} cb_excl, cb_shared;
#ifdef CONFIG_DMABUF_SYSFS_STATS
/**
* @sysfs_entry:
*
* For exposing information about this buffer in sysfs. See also
* `DMA-BUF statistics`_ for the uapi this enables.
*/
struct dma_buf_sysfs_entry {
struct kobject kobj;
struct dma_buf *dmabuf;
} *sysfs_entry;
#endif
};
/**
@ -464,7 +580,7 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
/**
* dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
* mappinsg
* mappings
* @attach: the DMA-buf attachment to check
*
* Returns true if a DMA-buf importer wants to call the map/unmap functions with

View file

@ -12,25 +12,41 @@
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
#include <linux/slab.h>
/**
* struct dma_fence_chain - fence to represent an node of a fence chain
* @base: fence base class
* @lock: spinlock for fence handling
* @prev: previous fence of the chain
* @prev_seqno: original previous seqno before garbage collection
* @fence: encapsulated fence
* @cb: callback structure for signaling
* @work: irq work item for signaling
* @lock: spinlock for fence handling
*/
struct dma_fence_chain {
struct dma_fence base;
spinlock_t lock;
struct dma_fence __rcu *prev;
u64 prev_seqno;
struct dma_fence *fence;
struct dma_fence_cb cb;
struct irq_work work;
union {
/**
* @cb: callback for signaling
*
* This is used to add the callback for signaling the
* complection of the fence chain. Never used at the same time
* as the irq work.
*/
struct dma_fence_cb cb;
/**
* @work: irq work item for signaling
*
* Irq work structure to allow us to add the callback without
* running into lock inversion. Never used at the same time as
* the callback.
*/
struct irq_work work;
};
spinlock_t lock;
};
extern const struct dma_fence_ops dma_fence_chain_ops;
@ -51,6 +67,30 @@ to_dma_fence_chain(struct dma_fence *fence)
return container_of(fence, struct dma_fence_chain, base);
}
/**
* dma_fence_chain_alloc
*
* Returns a new struct dma_fence_chain object or NULL on failure.
*/
static inline struct dma_fence_chain *dma_fence_chain_alloc(void)
{
return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
};
/**
* dma_fence_chain_free
* @chain: chain node to free
*
* Frees up an allocated but not used struct dma_fence_chain object. This
* doesn't need an RCU grace period since the fence was never initialized nor
* published. After dma_fence_chain_init() has been called the fence must be
* released by calling dma_fence_put(), and not through this function.
*/
static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
{
kfree(chain);
};
/**
* dma_fence_chain_for_each - iterate over all fences in chain
* @iter: current fence

View file

@ -20,6 +20,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
int iommu_dma_init_fq(struct iommu_domain *domain);
/* The DMA API isn't _quite_ the whole story, though... */
/*
@ -54,6 +55,11 @@ static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
{
}
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
{
return -EINVAL;
}
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
{
return -ENODEV;

View file

@ -41,8 +41,9 @@ struct dma_map_ops {
size_t size, enum dma_data_direction dir,
unsigned long attrs);
/*
* map_sg returns 0 on error and a value > 0 on success.
* It should never return a value < 0.
* map_sg should return a negative error code on error. See
* dma_map_sgtable() for a list of appropriate error codes
* and their meanings.
*/
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
@ -170,13 +171,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle);
int dma_release_from_global_coherent(int order, void *vaddr);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
size_t size, int *ret);
#else
static inline int dma_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
@ -186,7 +180,16 @@ static inline int dma_declare_coherent_memory(struct device *dev,
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
#endif /* CONFIG_DMA_DECLARE_COHERENT */
#ifdef CONFIG_DMA_GLOBAL_POOL
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle);
int dma_release_from_global_coherent(int order, void *vaddr);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
size_t size, int *ret);
int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
#else
static inline void *dma_alloc_from_global_coherent(struct device *dev,
ssize_t size, dma_addr_t *dma_handle)
{
@ -201,7 +204,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
{
return 0;
}
#endif /* CONFIG_DMA_DECLARE_COHERENT */
#endif /* CONFIG_DMA_GLOBAL_POOL */
/*
* This is the actual return value from the ->alloc_noncontiguous method.

View file

@ -105,11 +105,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
unsigned long attrs);
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs);
int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs);
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
@ -164,8 +166,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
static inline unsigned int dma_map_sg_attrs(struct device *dev,
struct scatterlist *sg, int nents, enum dma_data_direction dir,
unsigned long attrs)
{
return 0;
}
@ -174,6 +177,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
unsigned long attrs)
{
}
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
return -EOPNOTSUPP;
}
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@ -343,34 +351,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return dma_sync_single_for_device(dev, addr + offset, size, dir);
}
/**
* dma_map_sgtable - Map the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
* @dir: DMA direction
* @attrs: Optional DMA attributes for the map operation
*
* Maps a buffer described by a scatterlist stored in the given sg_table
* object for the @dir DMA operation by the @dev device. After success the
* ownership for the buffer is transferred to the DMA domain. One has to
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
* ownership of the buffer back to the CPU domain before touching the
* buffer by the CPU.
*
* Returns 0 on success or -EINVAL on error during mapping the buffer.
*/
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs)
{
int nents;
nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
if (nents <= 0)
return -EINVAL;
sgt->nents = nents;
return 0;
}
/**
* dma_unmap_sgtable - Unmap the given buffer for DMA
* @dev: The device for which to perform the DMA operation

View file

@ -380,6 +380,7 @@ enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
};
/**
@ -398,7 +399,7 @@ enum dma_slave_buswidth {
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
* Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
* Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in

View file

@ -11,60 +11,48 @@
struct dsa_switch;
struct sk_buff;
struct net_device;
struct packet_type;
struct dsa_8021q_context;
struct dsa_8021q_crosschip_link {
struct dsa_tag_8021q_vlan {
struct list_head list;
int port;
struct dsa_8021q_context *other_ctx;
int other_port;
u16 vid;
refcount_t refcount;
};
struct dsa_8021q_ops {
int (*vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags);
int (*vlan_del)(struct dsa_switch *ds, int port, u16 vid);
};
struct dsa_8021q_context {
const struct dsa_8021q_ops *ops;
struct dsa_switch *ds;
struct list_head crosschip_links;
struct list_head vlans;
/* EtherType of RX VID, used for filtering on master interface */
__be16 proto;
};
#define DSA_8021Q_N_SUBVLAN 8
int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled);
int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
struct dsa_8021q_context *other_ctx,
int other_port);
int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
struct dsa_8021q_context *other_ctx,
int other_port);
void dsa_tag_8021q_unregister(struct dsa_switch *ds);
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
int *subvlan);
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id);
int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
struct net_device *br,
int bridge_num);
void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
struct net_device *br,
int bridge_num);
u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num);
u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan);
int dsa_8021q_rx_switch_id(u16 vid);
int dsa_8021q_rx_source_port(u16 vid);
u16 dsa_8021q_rx_subvlan(u16 vid);
bool vid_is_dsa_8021q_rxvlan(u16 vid);
bool vid_is_dsa_8021q_txvlan(u16 vid);

View file

@ -16,6 +16,8 @@
#define ETH_P_SJA1105_META 0x0008
#define ETH_P_SJA1110 0xdadc
#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
#define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull
@ -59,14 +61,12 @@ struct sja1105_skb_cb {
((struct sja1105_skb_cb *)((skb)->cb))
struct sja1105_port {
u16 subvlan_map[DSA_8021Q_N_SUBVLAN];
struct kthread_worker *xmit_worker;
struct kthread_work xmit_work;
struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data;
struct dsa_port *dp;
bool hwts_tx_en;
u16 xmit_tpid;
};
enum sja1110_meta_tstamp {
@ -89,4 +89,22 @@ static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105)
extern const struct dsa_switch_ops sja1105_switch_ops;
static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
{
return dp->ds->ops == &sja1105_switch_ops;
}
#else
static inline bool dsa_port_is_sja1105(struct dsa_port *dp)
{
return false;
}
#endif
#endif /* _NET_DSA_SJA1105_H */

View file

@ -11,7 +11,7 @@
#include <linux/types.h>
/**
* em_perf_state - Performance state of a performance domain
* struct em_perf_state - Performance state of a performance domain
* @frequency: The frequency in KHz, for consistency with CPUFreq
* @power: The power consumed at this level (by 1 CPU or by a registered
* device). It can be a total power: static and dynamic.
@ -25,7 +25,7 @@ struct em_perf_state {
};
/**
* em_perf_domain - Performance domain
* struct em_perf_domain - Performance domain
* @table: List of performance states, in ascending order
* @nr_perf_states: Number of performance states
* @milliwatts: Flag indicating the power values are in milli-Watts
@ -53,6 +53,22 @@ struct em_perf_domain {
#ifdef CONFIG_ENERGY_MODEL
#define EM_MAX_POWER 0xFFFF
/*
* Increase resolution of energy estimation calculations for 64-bit
* architectures. The extra resolution improves decision made by EAS for the
* task placement when two Performance Domains might provide similar energy
* estimation values (w/o better resolution the values could be equal).
*
* We increase resolution only if we have enough bits to allow this increased
* resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
* are pretty high and the returns do not justify the increased costs.
*/
#ifdef CONFIG_64BIT
#define em_scale_power(p) ((p) * 1000)
#else
#define em_scale_power(p) (p)
#endif
struct em_data_callback {
/**
* active_power() - Provide power at the next performance state of
@ -87,12 +103,12 @@ void em_dev_unregister_perf_domain(struct device *dev);
/**
* em_cpu_energy() - Estimates the energy consumed by the CPUs of a
performance domain
* performance domain
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
* @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
might reflect reduced frequency (due to thermal)
* might reflect reduced frequency (due to thermal)
*
* This function must be used only for CPU devices. There is no validation,
* i.e. if the EM is a CPU type and has cpumask allocated. It is called from

View file

@ -2,7 +2,11 @@
#ifndef __LINUX_ENTRYKVM_H
#define __LINUX_ENTRYKVM_H
#include <linux/entry-common.h>
#include <linux/static_call_types.h>
#include <linux/tracehook.h>
#include <linux/syscalls.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
#include <linux/tick.h>
/* Transfer to guest mode work */

View file

@ -31,5 +31,6 @@
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
#define ERECALLCONFLICT 530 /* conflict with recalled state */
#define ENOGRACE 531 /* NFS file lock reclaim refused */
#endif

View file

@ -299,6 +299,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
#endif
}
/**
* eth_hw_addr_set - Assign Ethernet address to a net_device
* @dev: pointer to net_device structure
* @addr: address to assign
*
* Assign given address to the net_device, addr_assign_type is not changed.
*/
static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
{
ether_addr_copy(dev->dev_addr, addr);
}
/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to

View file

@ -15,10 +15,9 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
#include <linux/netlink.h>
#include <uapi/linux/ethtool.h>
#ifdef CONFIG_COMPAT
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
union ethtool_flow_union h_u;
@ -38,8 +37,6 @@ struct compat_ethtool_rxnfc {
u32 rule_locs[];
};
#endif /* CONFIG_COMPAT */
#include <linux/rculist.h>
/**
@ -176,6 +173,11 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
struct kernel_ethtool_coalesce {
u8 use_cqe_mode_tx;
u8 use_cqe_mode_rx;
};
/**
* ethtool_intersect_link_masks - Given two link masks, AND them together
* @dst: first mask and where result is stored
@ -215,7 +217,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0)
#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@ -241,6 +245,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
#define ETHTOOL_COALESCE_USE_CQE \
(ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
#define ETHTOOL_STAT_NOT_SET (~0ULL)
@ -606,8 +612,14 @@ struct ethtool_ops {
struct ethtool_eeprom *, u8 *);
int (*set_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
int (*get_coalesce)(struct net_device *,
struct ethtool_coalesce *,
struct kernel_ethtool_coalesce *,
struct netlink_ext_ack *);
int (*set_coalesce)(struct net_device *,
struct ethtool_coalesce *,
struct kernel_ethtool_coalesce *,
struct netlink_ext_ack *);
void (*get_ringparam)(struct net_device *,
struct ethtool_ringparam *);
int (*set_ringparam)(struct net_device *,

View file

@ -68,4 +68,22 @@ static inline void eventpoll_release(struct file *file) {}
#endif
#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
/* ARM OABI has an incompatible struct layout and needs a special handler */
extern struct epoll_event __user *
epoll_put_uevent(__poll_t revents, __u64 data,
struct epoll_event __user *uevent);
#else
static inline struct epoll_event __user *
epoll_put_uevent(__poll_t revents, __u64 data,
struct epoll_event __user *uevent)
{
if (__put_user(revents, &uevent->events) ||
__put_user(data, &uevent->data))
return NULL;
return uevent+1;
}
#endif
#endif /* #ifndef _LINUX_EVENTPOLL_H */

View file

@ -221,6 +221,8 @@ struct export_operations {
#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
atomic attribute updates
*/
#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do
asychronous blocking locks */
unsigned long flags;
};

View file

@ -2,6 +2,7 @@
#ifndef _LINUX_FB_H
#define _LINUX_FB_H
#include <linux/refcount.h>
#include <linux/kgdb.h>
#include <uapi/linux/fb.h>
@ -435,7 +436,7 @@ struct fb_tile_ops {
struct fb_info {
atomic_t count;
refcount_t count;
int node;
int flags;
/*

View file

@ -5,8 +5,6 @@
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
#include <stdarg.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/compat.h>
@ -574,7 +572,8 @@ struct bpf_prog {
kprobe_override:1, /* Do we override a kprobe? */
has_callchain_buf:1, /* callchain buffer allocated? */
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
call_get_func_ip:1; /* Do we call get_func_ip() */
enum bpf_prog_type type; /* Type of BPF program */
enum bpf_attach_type expected_attach_type; /* For some prog types */
u32 len; /* Number of filter blocks */
@ -599,25 +598,38 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
u32 __ret; \
cant_migrate(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
struct bpf_prog_stats *__stats; \
u64 __start = sched_clock(); \
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
__stats = this_cpu_ptr(prog->stats); \
u64_stats_update_begin(&__stats->syncp); \
__stats->cnt++; \
__stats->nsecs += sched_clock() - __start; \
u64_stats_update_end(&__stats->syncp); \
} else { \
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
} \
__ret; })
typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
const struct bpf_insn *insnsi,
unsigned int (*bpf_func)(const void *,
const struct bpf_insn *));
#define BPF_PROG_RUN(prog, ctx) \
__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
const void *ctx,
bpf_dispatcher_fn dfunc)
{
u32 ret;
cant_migrate();
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
struct bpf_prog_stats *stats;
u64 start = sched_clock();
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
stats = this_cpu_ptr(prog->stats);
u64_stats_update_begin(&stats->syncp);
stats->cnt++;
stats->nsecs += sched_clock() - start;
u64_stats_update_end(&stats->syncp);
} else {
ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
}
return ret;
}
static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
{
return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
}
/*
* Use in preemptible and therefore migratable context to make sure that
@ -636,7 +648,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
u32 ret;
migrate_disable();
ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
ret = bpf_prog_run(prog, ctx);
migrate_enable();
return ret;
}
@ -709,7 +721,7 @@ static inline void bpf_restore_data_end(
cb->data_end = saved_data_end;
}
static inline u8 *bpf_skb_cb(struct sk_buff *skb)
static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
* data between tail calls. Since this also needs to work with
@ -730,8 +742,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
/* Must be invoked with migration disabled */
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
const void *ctx)
{
const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
u32 res;
@ -741,7 +754,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
memset(cb_data, 0, sizeof(cb_saved));
}
res = BPF_PROG_RUN(prog, skb);
res = bpf_prog_run(prog, skb);
if (unlikely(prog->cb_access))
memcpy(cb_data, cb_saved, sizeof(cb_saved));
@ -775,6 +788,10 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
DECLARE_BPF_DISPATCHER(xdp)
DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
u32 xdp_master_redirect(struct xdp_buff *xdp);
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
@ -782,7 +799,14 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* under local_bh_disable(), which provides the needed RCU protection
* for accessing map entries.
*/
return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) {
if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev))
act = xdp_master_redirect(xdp);
}
return act;
}
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
@ -1428,7 +1452,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
};
u32 act;
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
@ -1466,7 +1490,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
};
u32 act;
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;

View file

@ -52,6 +52,10 @@
#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
/* Loader commands */
#define PM_LOAD_PDI 0x701
#define PDI_SRC_DDR 0xF
/*
* Firmware FPGA Manager flags
* XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
@ -411,6 +415,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
u32 *value);
int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
u32 value);
int zynqmp_pm_load_pdi(const u32 src, const u64 address);
#else
static inline int zynqmp_pm_get_api_version(u32 *version)
{
@ -622,6 +627,11 @@ static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
{
return -ENODEV;
}
static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address)
{
return -ENODEV;
}
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */

View file

@ -110,7 +110,7 @@ struct fpga_image_info {
* @initial_header_size: Maximum number of bytes that should be passed into write_init
* @state: returns an enum value of the FPGA's state
* @status: returns status of the FPGA, including reconfiguration error code
* @write_init: prepare the FPGA to receive confuration data
* @write_init: prepare the FPGA to receive configuration data
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done

View file

@ -319,6 +319,8 @@ enum rw_hint {
/* iocb->ki_waitq is valid */
#define IOCB_WAITQ (1 << 19)
#define IOCB_NOIO (1 << 20)
/* can use bio alloc cache */
#define IOCB_ALLOC_CACHE (1 << 21)
struct kiocb {
struct file *ki_filp;
@ -586,6 +588,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
/*
* ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
* cache the ACL. This also means that ->get_acl() can be called in RCU mode
* with the LOOKUP_RCU flag.
*/
#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
@ -1035,6 +1042,7 @@ static inline struct file *get_file(struct file *f)
#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
#define FL_LAYOUT 2048 /* outstanding pNFS layout */
#define FL_RECLAIM 4096 /* reclaiming from a reboot server */
#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
@ -2106,7 +2114,7 @@ struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
int (*permission) (struct user_namespace *, struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int, bool);
int (*readlink) (struct dentry *, char __user *,int);
@ -2498,7 +2506,6 @@ static inline void file_accessed(struct file *file)
extern int file_modified(struct file *file);
int sync_inode(struct inode *inode, struct writeback_control *wbc);
int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
@ -2744,6 +2751,7 @@ static inline struct file *file_clone_open(struct file *file)
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
extern struct filename *getname_uflags(const char __user *, int);
extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);
@ -2849,6 +2857,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
extern int filemap_check_errors(struct address_space *mapping);
extern void __filemap_set_wb_err(struct address_space *mapping, int err);
int filemap_fdatawrite_wbc(struct address_space *mapping,
struct writeback_control *wbc);
static inline int filemap_write_and_wait(struct address_space *mapping)
{
@ -3013,15 +3023,20 @@ static inline void file_end_write(struct file *file)
}
/*
* This is used for regular files where some users -- especially the
* currently executed binary in a process, previously handled via
* VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap
* read-write shared) accesses.
*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
* This is used for regular files.
* We cannot support write (and maybe mmap read-write shared) accesses and
* MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
* can have the following values:
* 0: no writers, no VM_DENYWRITE mappings
* < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
* > 0: (i_writecount) users are writing to the file.
* deny_write_access() denies write access to a file.
* allow_write_access() re-enables write access to a file.
*
* The i_writecount field of an inode can have the following values:
* 0: no write access, no denied write access
* < 0: (-i_writecount) users that denied write access to the file.
* > 0: (i_writecount) users that have write access to the file.
*
* Normally we operate on that counter with atomic_{inc,dec} and it's safe
* except for the cases where we don't hold i_writecount yet. Then we need to
@ -3204,10 +3219,6 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
/* fs/block_dev.c */
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
int datasync);
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
@ -3313,6 +3324,7 @@ extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@ -3427,6 +3439,8 @@ extern int buffer_migrate_page_norefs(struct address_space *,
#define buffer_migrate_page_norefs NULL
#endif
int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
unsigned int ia_valid);
int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
void setattr_copy(struct user_namespace *, struct inode *inode,
@ -3580,7 +3594,7 @@ int proc_nr_dentry(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int proc_nr_inodes(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int __init get_filesystem_list(char *buf);
int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)

View file

@ -147,7 +147,6 @@ struct fscache_retrieval {
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
void *context; /* netfs read context (pinned) */
struct list_head to_do; /* list of things to be done by the backend */
unsigned long start_time; /* time at which retrieval started */
atomic_t n_pages; /* number of pages to be retrieved */
};
@ -385,9 +384,6 @@ struct fscache_object {
struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */
#ifdef CONFIG_FSCACHE_OBJECT_LIST
struct rb_node objlist_link; /* link in global object list */
#endif
pgoff_t store_limit; /* current storage limit */
loff_t store_limit_l; /* current storage limit */
};

View file

@ -123,15 +123,17 @@ struct fscache_netfs {
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
atomic_t usage; /* number of users of this cookie */
refcount_t ref; /* number of users of this cookie */
atomic_t n_children; /* number of children of this cookie */
atomic_t n_active; /* number of active users of netfs ptrs */
unsigned int debug_id;
spinlock_t lock;
spinlock_t stores_lock; /* lock on page store tree */
struct hlist_head backing_objects; /* object(s) backing this file/index */
const struct fscache_cookie_def *def; /* definition */
struct fscache_cookie *parent; /* parent of this entry */
struct hlist_bl_node hash_link; /* Link in hash table */
struct list_head proc_link; /* Link in proc list */
void *netfs_data; /* back pointer to netfs */
struct radix_tree_root stores; /* pages to be stored on this cookie */
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */

View file

@ -47,27 +47,128 @@ struct fscrypt_name {
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40
#ifdef CONFIG_FS_ENCRYPTION
/*
* fscrypt superblock flags
* If set, the fscrypt bounce page pool won't be allocated (unless another
* filesystem needs it). Set this if the filesystem always uses its own bounce
* pages for writes and therefore won't need the fscrypt bounce page pool.
*/
#define FS_CFLG_OWN_PAGES (1U << 1)
/*
* crypto operations for filesystems
*/
/* Crypto operations for filesystems */
struct fscrypt_operations {
/* Set of optional flags; see above for allowed flags */
unsigned int flags;
/*
* If set, this is a filesystem-specific key description prefix that
* will be accepted for "logon" keys for v1 fscrypt policies, in
* addition to the generic prefix "fscrypt:". This functionality is
* deprecated, so new filesystems shouldn't set this field.
*/
const char *key_prefix;
/*
* Get the fscrypt context of the given inode.
*
* @inode: the inode whose context to get
* @ctx: the buffer into which to get the context
* @len: length of the @ctx buffer in bytes
*
* Return: On success, returns the length of the context in bytes; this
* may be less than @len. On failure, returns -ENODATA if the
* inode doesn't have a context, -ERANGE if the context is
* longer than @len, or another -errno code.
*/
int (*get_context)(struct inode *inode, void *ctx, size_t len);
/*
* Set an fscrypt context on the given inode.
*
* @inode: the inode whose context to set. The inode won't already have
* an fscrypt context.
* @ctx: the context to set
* @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE)
* @fs_data: If called from fscrypt_set_context(), this will be the
* value the filesystem passed to fscrypt_set_context().
* Otherwise (i.e. when called from
* FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL.
*
* i_rwsem will be held for write.
*
* Return: 0 on success, -errno on failure.
*/
int (*set_context)(struct inode *inode, const void *ctx, size_t len,
void *fs_data);
/*
* Get the dummy fscrypt policy in use on the filesystem (if any).
*
* Filesystems only need to implement this function if they support the
* test_dummy_encryption mount option.
*
* Return: A pointer to the dummy fscrypt policy, if the filesystem is
* mounted with test_dummy_encryption; otherwise NULL.
*/
const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb);
/*
* Check whether a directory is empty. i_rwsem will be held for write.
*/
bool (*empty_dir)(struct inode *inode);
/* The filesystem's maximum ciphertext filename length, in bytes */
unsigned int max_namelen;
/*
* Check whether the filesystem's inode numbers and UUID are stable,
* meaning that they will never be changed even by offline operations
* such as filesystem shrinking and therefore can be used in the
* encryption without the possibility of files becoming unreadable.
*
* Filesystems only need to implement this function if they want to
* support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These
* flags are designed to work around the limitations of UFS and eMMC
* inline crypto hardware, and they shouldn't be used in scenarios where
* such hardware isn't being used.
*
* Leaving this NULL is equivalent to always returning false.
*/
bool (*has_stable_inodes)(struct super_block *sb);
/*
* Get the number of bits that the filesystem uses to represent inode
* numbers and file logical block numbers.
*
* By default, both of these are assumed to be 64-bit. This function
* can be implemented to declare that either or both of these numbers is
* shorter, which may allow the use of the
* FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
* inline crypto hardware whose maximum DUN length is less than 64 bits
* (e.g., eMMC v5.2 spec compliant hardware). This function only needs
* to be implemented if support for one of these features is needed.
*/
void (*get_ino_and_lblk_bits)(struct super_block *sb,
int *ino_bits_ret, int *lblk_bits_ret);
/*
* Return the number of block devices to which the filesystem may write
* encrypted file contents.
*
* If the filesystem can use multiple block devices (other than block
* devices that aren't used for encrypted file contents, such as
* external journal devices), and wants to support inline encryption,
* then it must implement this function. Otherwise it's not needed.
*/
int (*get_num_devices)(struct super_block *sb);
/*
* If ->get_num_devices() returns a value greater than 1, then this
* function is called to get the array of request_queues that the
* filesystem is using -- one per block device. (There may be duplicate
* entries in this array, as block devices can share a request_queue.)
*/
void (*get_devices)(struct super_block *sb,
struct request_queue **devs);
};
@ -253,6 +354,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
unsigned int max_size,
struct delayed_call *done);
int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
static inline void fscrypt_set_ops(struct super_block *sb,
const struct fscrypt_operations *s_cop)
{
@ -583,6 +685,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
return ERR_PTR(-EOPNOTSUPP);
}
static inline int fscrypt_symlink_getattr(const struct path *path,
struct kstat *stat)
{
return -EOPNOTSUPP;
}
static inline void fscrypt_set_ops(struct super_block *sb,
const struct fscrypt_operations *s_cop)
{

View file

@ -423,7 +423,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
u16 if_id);
extern struct bus_type fsl_mc_bus_type;

View file

@ -8,34 +8,11 @@
/* All generic netlink requests are serialized by a global lock. */
extern void genl_lock(void);
extern void genl_unlock(void);
#ifdef CONFIG_LOCKDEP
extern bool lockdep_genl_is_held(void);
#endif
/* for synchronisation between af_netlink and genetlink */
extern atomic_t genl_sk_destructing_cnt;
extern wait_queue_head_t genl_sk_destructing_waitq;
/**
* rcu_dereference_genl - rcu_dereference with debug checking
* @p: The pointer to read, prior to dereferencing
*
* Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
* or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
*/
#define rcu_dereference_genl(p) \
rcu_dereference_check(p, lockdep_genl_is_held())
/**
* genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
* the READ_ONCE(), because caller holds genl mutex.
*/
#define genl_dereference(p) \
rcu_dereference_protected(p, lockdep_genl_is_held())
#define MODULE_ALIAS_GENL_FAMILY(family)\
MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)

View file

@ -60,9 +60,6 @@ struct partition_meta_info {
* device.
* Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl.
*
* ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up",
* with a similar meaning to network interfaces.
*
* ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include
* partition information in ``/proc/partitions`` or in the output of
* printk_all_partitions().
@ -97,7 +94,6 @@ struct partition_meta_info {
/* 2 is unused (used to be GENHD_FL_DRIVERFS) */
/* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */
#define GENHD_FL_CD 0x0008
#define GENHD_FL_UP 0x0010
#define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020
#define GENHD_FL_EXT_DEVT 0x0040
#define GENHD_FL_NATIVE_CAPACITY 0x0080
@ -153,13 +149,15 @@ struct gendisk {
unsigned long state;
#define GD_NEED_PART_SCAN 0
#define GD_READ_ONLY 1
#define GD_QUEUE_REF 2
struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */
struct backing_dev_info *bdi;
struct kobject *slave_dir;
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
struct list_head slave_bdevs;
#endif
struct timer_rand_state *random;
atomic_t sync_io; /* RAID */
struct disk_events *ev;
@ -172,8 +170,14 @@ struct gendisk {
int node_id;
struct badblocks *bb;
struct lockdep_map lockdep_map;
u64 diskseq;
};
static inline bool disk_live(struct gendisk *disk)
{
return !inode_unhashed(disk->part0->bd_inode);
}
/*
* The gendisk is refcounted by the part0 block_device, and the bd_device
* therein is also used for device model presentation in sysfs.
@ -210,18 +214,12 @@ static inline dev_t disk_devt(struct gendisk *disk)
void disk_uevent(struct gendisk *disk, enum kobject_action action);
/* block/genhd.c */
extern void device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups);
static inline void add_disk(struct gendisk *disk)
int device_add_disk(struct device *parent, struct gendisk *disk,
const struct attribute_group **groups);
static inline int add_disk(struct gendisk *disk)
{
device_add_disk(NULL, disk, NULL);
return device_add_disk(NULL, disk, NULL);
}
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
static inline void add_disk_no_queue_reg(struct gendisk *disk)
{
device_add_disk_no_queue_reg(NULL, disk);
}
extern void del_gendisk(struct gendisk *gp);
void set_disk_ro(struct gendisk *disk, bool read_only);
@ -236,6 +234,7 @@ extern void disk_block_events(struct gendisk *disk);
extern void disk_unblock_events(struct gendisk *disk);
extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
bool disk_force_media_change(struct gendisk *disk, unsigned int events);
/* drivers/char/random.c */
extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
@ -259,26 +258,10 @@ static inline sector_t get_capacity(struct gendisk *disk)
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
void blk_drop_partitions(struct gendisk *disk);
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
struct lock_class_key *lkclass);
extern void put_disk(struct gendisk *disk);
#define alloc_disk_node(minors, node_id) \
({ \
static struct lock_class_key __key; \
const char *__name; \
struct gendisk *__disk; \
\
__name = "(gendisk_completion)"#minors"("#node_id")"; \
\
__disk = __alloc_disk_node(minors, node_id); \
\
if (__disk) \
lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
\
__disk; \
})
#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
/**
* blk_alloc_disk - allocate a gendisk structure
@ -291,15 +274,10 @@ extern void put_disk(struct gendisk *disk);
*/
#define blk_alloc_disk(node_id) \
({ \
struct gendisk *__disk = __blk_alloc_disk(node_id); \
static struct lock_class_key __key; \
\
if (__disk) \
lockdep_init_map(&__disk->lockdep_map, \
"(bio completion)", &__key, 0); \
__disk; \
__blk_alloc_disk(node_id, &__key); \
})
struct gendisk *__blk_alloc_disk(int node);
void blk_cleanup_disk(struct gendisk *disk);
int __register_blkdev(unsigned int major, const char *name,
@ -316,9 +294,10 @@ void set_capacity(struct gendisk *disk, sector_t size);
int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
#ifdef CONFIG_SYSFS
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
int bd_register_pending_holders(struct gendisk *disk);
#else
static inline int bd_link_disk_holder(struct block_device *bdev,
struct gendisk *disk)
@ -329,9 +308,14 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev,
struct gendisk *disk)
{
}
#endif /* CONFIG_SYSFS */
static inline int bd_register_pending_holders(struct gendisk *disk)
{
return 0;
}
#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
dev_t part_devt(struct gendisk *disk, u8 partno);
void inc_diskseq(struct gendisk *disk);
dev_t blk_lookup_devt(const char *name, int partno);
void blk_request_module(dev_t devt);
#ifdef CONFIG_BLOCK

View file

@ -609,7 +609,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
struct device_node;
struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label);
@ -619,7 +619,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
struct device_node;
static inline
struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label)
@ -633,7 +633,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
struct device_node;
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct device_node *node,
const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label);
@ -644,7 +644,7 @@ struct device_node;
static inline
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct device_node *node,
const struct device_node *node,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label)
@ -680,10 +680,10 @@ struct acpi_gpio_mapping {
unsigned int quirks;
};
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
struct acpi_device;
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios);
void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
@ -696,8 +696,6 @@ struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label);
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
struct acpi_device;
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
{

View file

@ -312,6 +312,9 @@ struct gpio_irq_chip {
* get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
* @offset: when multiple gpio chips belong to the same device this
* can be used as offset within the device so friendly names can
* be properly assigned.
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
@ -398,6 +401,7 @@ struct gpio_chip {
int base;
u16 ngpio;
u16 offset;
const char *const *names;
bool can_sleep;

View file

@ -116,7 +116,6 @@ extern void rcu_nmi_exit(void);
do { \
lockdep_off(); \
arch_nmi_enter(); \
printk_nmi_enter(); \
BUG_ON(in_nmi() == NMI_MASK); \
__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
} while (0)
@ -135,7 +134,6 @@ extern void rcu_nmi_exit(void);
do { \
BUG_ON(!in_nmi()); \
__preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
printk_nmi_exit(); \
arch_nmi_exit(); \
lockdep_on(); \
} while (0)

View file

@ -22,7 +22,7 @@ struct hdlc_proto {
void (*start)(struct net_device *dev); /* if open & DCD */
void (*stop)(struct net_device *dev); /* if open & !DCD */
void (*detach)(struct net_device *dev);
int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
int (*ioctl)(struct net_device *dev, struct if_settings *ifs);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
@ -54,7 +54,7 @@ typedef struct hdlc_device {
/* Exported from hdlc module */
/* Called by hardware driver when a user requests HDLC service */
int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs);
/* Must be used by hardware driver on module startup/exit */
#define register_hdlc_device(dev) register_netdev(dev)

View file

@ -79,7 +79,7 @@ struct hdlcdrv_ops {
*/
int (*open)(struct net_device *);
int (*close)(struct net_device *);
int (*ioctl)(struct net_device *, struct ifreq *,
int (*ioctl)(struct net_device *, void __user *,
struct hdlcdrv_ioctl *, int);
};

View file

@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr)
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
preempt_disable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable();
pagefault_disable();
return __kmap_local_page_prot(page, prot);
}
@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page)
static inline void *kmap_atomic_pfn(unsigned long pfn)
{
preempt_disable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable();
pagefault_disable();
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
@ -111,7 +119,10 @@ static inline void __kunmap_atomic(void *addr)
{
kunmap_local_indexed(addr);
pagefault_enable();
preempt_enable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_enable();
else
preempt_enable();
}
unsigned int __nr_free_highpages(void);
@ -179,7 +190,10 @@ static inline void __kunmap_local(void *addr)
static inline void *kmap_atomic(struct page *page)
{
preempt_disable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
else
preempt_disable();
pagefault_disable();
return page_address(page);
}
@ -200,7 +214,10 @@ static inline void __kunmap_atomic(void *addr)
kunmap_flush_on_unmap(addr);
#endif
pagefault_enable();
preempt_enable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_enable();
else
preempt_enable();
}
static inline unsigned int nr_free_highpages(void) { return 0; }

View file

@ -130,10 +130,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page
}
#endif
#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
}
#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}

View file

@ -170,6 +170,8 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
u32 syncpt_id);
struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
/*
* host1x channel
*/
@ -216,8 +218,8 @@ struct host1x_job {
struct host1x_client *client;
/* Gathers and their memory */
struct host1x_job_gather *gathers;
unsigned int num_gathers;
struct host1x_job_cmd *cmds;
unsigned int num_cmds;
/* Array of handles to be pinned & unpinned */
struct host1x_reloc *relocs;
@ -234,9 +236,15 @@ struct host1x_job {
u32 syncpt_incrs;
u32 syncpt_end;
/* Completion waiter ref */
void *waiter;
/* Maximum time to wait for this job */
unsigned int timeout;
/* Job has timed out and should be released */
bool cancelled;
/* Index and number of slots used in the push buffer */
unsigned int first_get;
unsigned int num_slots;
@ -257,12 +265,25 @@ struct host1x_job {
/* Add a channel wait for previous ops to complete */
bool serialize;
/* Fast-forward syncpoint increments on job timeout */
bool syncpt_recovery;
/* Callback called when job is freed */
void (*release)(struct host1x_job *job);
void *user_data;
/* Whether host1x-side firewall should be ran for this job or not */
bool enable_firewall;
};
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
u32 num_cmdbufs, u32 num_relocs);
u32 num_cmdbufs, u32 num_relocs,
bool skip_firewall);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
unsigned int words, unsigned int offset);
void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
bool relative, u32 next_class);
struct host1x_job *host1x_job_get(struct host1x_job *job);
void host1x_job_put(struct host1x_job *job);
int host1x_job_pin(struct host1x_job *job, struct device *dev);

View file

@ -858,6 +858,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
static inline void hugetlb_count_init(struct mm_struct *mm)
{
atomic_long_set(&mm->hugetlb_usage, 0);
}
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
atomic_long_add(l, &mm->hugetlb_usage);
@ -1042,6 +1047,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
static inline void hugetlb_count_init(struct mm_struct *mm)
{
}
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}

View file

@ -121,6 +121,13 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
css_put(&h_cg->css);
}
static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
struct resv_map *resv_map)
{
if (resv_map->css)
css_get(resv_map->css);
}
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
@ -199,6 +206,11 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
{
}
static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
struct resv_map *resv_map)
{
}
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{

View file

@ -538,12 +538,6 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
static inline u32
hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
{
return rbi->ring_buffer->pending_send_sz;
}
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@ -1092,16 +1086,6 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c,
c->outbound.ring_buffer->pending_send_sz = size;
}
static inline void set_low_latency_mode(struct vmbus_channel *c)
{
c->low_latency = true;
}
static inline void clear_low_latency_mode(struct vmbus_channel *c)
{
c->low_latency = false;
}
void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
int vmbus_request_offers(void);

View file

@ -1010,6 +1010,7 @@ struct acpi_resource_i2c_serialbus;
#if IS_ENABLED(CONFIG_ACPI)
bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c);
int i2c_acpi_client_count(struct acpi_device *adev);
u32 i2c_acpi_find_bus_speed(struct device *dev);
struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
struct i2c_board_info *info);
@ -1020,6 +1021,10 @@ static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
{
return false;
}
static inline int i2c_acpi_client_count(struct acpi_device *adev)
{
return 0;
}
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
{
return 0;

View file

@ -1088,6 +1088,48 @@ struct ieee80211_ext {
} u;
} __packed __aligned(2);
#define IEEE80211_TWT_CONTROL_NDP BIT(0)
#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
enum ieee80211_twt_setup_cmd {
TWT_SETUP_CMD_REQUEST,
TWT_SETUP_CMD_SUGGEST,
TWT_SETUP_CMD_DEMAND,
TWT_SETUP_CMD_GROUPING,
TWT_SETUP_CMD_ACCEPT,
TWT_SETUP_CMD_ALTERNATE,
TWT_SETUP_CMD_DICTATE,
TWT_SETUP_CMD_REJECT,
};
struct ieee80211_twt_params {
__le16 req_type;
__le64 twt;
u8 min_twt_dur;
__le16 mantissa;
u8 channel;
} __packed;
struct ieee80211_twt_setup {
u8 dialog_token;
u8 element_id;
u8 length;
u8 control;
u8 params[];
} __packed;
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@ -1252,6 +1294,10 @@ struct ieee80211_mgmt {
__le16 toa_error;
u8 variable[0];
} __packed ftm;
struct {
u8 action_code;
u8 variable[];
} __packed s1g;
} u;
} __packed action;
} u;
@ -2266,6 +2312,9 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
/**
* ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
* @primary: primary channel
@ -2282,12 +2331,51 @@ struct ieee80211_he_6ghz_oper {
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38
u8 control;
u8 ccfs0;
u8 ccfs1;
u8 minrate;
} __packed;
/*
* In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021",
* it show four types in "Table 9-275a-Maximum Transmit Power Interpretation
* subfield encoding", and two category for each type in "Table E-12-Regulatory
* Info subfield encoding in the United States".
* So it it totally max 8 Transmit Power Envelope element.
*/
#define IEEE80211_TPE_MAX_IE_COUNT 8
/*
* In "Table 9-277—Meaning of Maximum Transmit Power Count subfield"
* of "IEEE Std 802.11ax™2021", the max power level is 8.
*/
#define IEEE80211_MAX_NUM_PWR_LEVEL 8
#define IEEE80211_TPE_MAX_POWER_COUNT 8
/* transmit power interpretation type of transmit power envelope element */
enum ieee80211_tx_power_intrpt_type {
IEEE80211_TPE_LOCAL_EIRP,
IEEE80211_TPE_LOCAL_EIRP_PSD,
IEEE80211_TPE_REG_CLIENT_EIRP,
IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
};
/**
* struct ieee80211_tx_pwr_env
*
* This structure represents the "Transmit Power Envelope element"
*/
struct ieee80211_tx_pwr_env {
u8 tx_power_info;
s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT];
} __packed;
#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
* @he_oper_ie: byte data of the He Operations IE, stating from the byte
@ -2869,7 +2957,7 @@ enum ieee80211_eid {
WLAN_EID_VHT_OPERATION = 192,
WLAN_EID_EXTENDED_BSS_LOAD = 193,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
WLAN_EID_TX_POWER_ENVELOPE = 195,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
WLAN_EID_AID = 197,
WLAN_EID_QUIET_CHANNEL = 198,
@ -2881,6 +2969,7 @@ enum ieee80211_eid {
WLAN_EID_AID_RESPONSE = 211,
WLAN_EID_S1G_BCN_COMPAT = 213,
WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214,
WLAN_EID_S1G_TWT = 216,
WLAN_EID_S1G_CAPABILITIES = 217,
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
@ -2950,6 +3039,7 @@ enum ieee80211_category {
WLAN_CATEGORY_FST = 18,
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
WLAN_CATEGORY_S1G = 22,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@ -3023,6 +3113,20 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
enum ieee80211_s1g_actioncode {
WLAN_S1G_AID_SWITCH_REQUEST,
WLAN_S1G_AID_SWITCH_RESPONSE,
WLAN_S1G_SYNC_CONTROL,
WLAN_S1G_STA_INFO_ANNOUNCE,
WLAN_S1G_EDCA_PARAM_SET,
WLAN_S1G_EL_OPERATION,
WLAN_S1G_TWT_SETUP,
WLAN_S1G_TWT_TEARDOWN,
WLAN_S1G_SECT_GROUP_ID_LIST,
WLAN_S1G_SECT_ID_FEEDBACK,
WLAN_S1G_TWT_INFORMATION = 11,
};
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8

View file

@ -57,10 +57,16 @@ struct br_ip_list {
#define BR_MRP_AWARE BIT(17)
#define BR_MRP_LOST_CONT BIT(18)
#define BR_MRP_LOST_IN_CONT BIT(19)
#define BR_TX_FWD_OFFLOAD BIT(20)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
struct net_bridge;
void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br,
unsigned int cmd, struct ifreq *ifr,
void __user *uarg));
int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
struct ifreq *ifr, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
@ -70,9 +76,6 @@ bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
bool br_multicast_router(const struct net_device *dev);
int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
const void *ctx, bool adding, struct notifier_block *nb,
struct netlink_ext_ack *extack);
#else
static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
@ -104,13 +107,6 @@ static inline bool br_multicast_router(const struct net_device *dev)
{
return false;
}
static inline int br_mdb_replay(const struct net_device *br_dev,
const struct net_device *dev, const void *ctx,
bool adding, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
#endif
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
@ -120,9 +116,8 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid);
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo);
int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
const void *ctx, bool adding, struct notifier_block *nb,
struct netlink_ext_ack *extack);
int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
@ -150,12 +145,10 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
return -EINVAL;
}
static inline int br_vlan_replay(struct net_device *br_dev,
struct net_device *dev, const void *ctx,
bool adding, struct notifier_block *nb,
struct netlink_ext_ack *extack)
static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo)
{
return -EOPNOTSUPP;
return -EINVAL;
}
#endif
@ -167,8 +160,6 @@ void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
u8 br_port_get_stp_state(const struct net_device *dev);
clock_t br_get_ageing_time(const struct net_device *br_dev);
int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev,
const void *ctx, bool adding, struct notifier_block *nb);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@ -197,13 +188,6 @@ static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
{
return 0;
}
static inline int br_fdb_replay(const struct net_device *br_dev,
const struct net_device *dev, const void *ctx,
bool adding, struct notifier_block *nb)
{
return -EOPNOTSUPP;
}
#endif
#endif

View file

@ -41,9 +41,6 @@ struct ip_sf_socklist {
__be32 sl_addr[];
};
#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
(count) * sizeof(__be32))
#define IP_SFBLOCK 10 /* allocate this many at once */
/* ip_mc_socklist is real list now. Speed is not argument;

View file

@ -11,9 +11,11 @@
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/kexec.h>
#include <crypto/hash_info.h>
struct linux_binprm;
#ifdef CONFIG_IMA
extern enum hash_algo ima_get_current_hash_algo(void);
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_file_check(struct file *file, int mask);
extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
@ -33,10 +35,10 @@ extern void ima_post_path_mknod(struct user_namespace *mnt_userns,
extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size);
extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
extern void ima_measure_critical_data(const char *event_label,
const char *event_name,
const void *buf, size_t buf_len,
bool hash);
extern int ima_measure_critical_data(const char *event_label,
const char *event_name,
const void *buf, size_t buf_len,
bool hash, u8 *digest, size_t digest_len);
#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
extern void ima_appraise_parse_cmdline(void);
@ -64,6 +66,11 @@ static inline const char * const *arch_get_ima_policy(void)
#endif
#else
static inline enum hash_algo ima_get_current_hash_algo(void)
{
return HASH_ALGO__LAST;
}
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
return 0;
@ -137,10 +144,14 @@ static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size
static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
static inline void ima_measure_critical_data(const char *event_label,
static inline int ima_measure_critical_data(const char *event_label,
const char *event_name,
const void *buf, size_t buf_len,
bool hash) {}
bool hash, u8 *digest,
size_t digest_len)
{
return -ENOENT;
}
#endif /* CONFIG_IMA */

View file

@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
#ifdef CONFIG_INET
int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
#else
static inline int inet_gifconf(struct net_device *dev, char __user *buf,
int len, int size)
{
return 0;
}
#endif
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);

View file

@ -124,9 +124,9 @@
#define DMAR_MTRR_PHYSMASK8_REG 0x208
#define DMAR_MTRR_PHYSBASE9_REG 0x210
#define DMAR_MTRR_PHYSMASK9_REG 0x218
#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)

View file

@ -14,6 +14,11 @@
#define SVM_REQ_EXEC (1<<1)
#define SVM_REQ_PRIV (1<<0)
/* Page Request Queue depth */
#define PRQ_ORDER 2
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
/*
* The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
* for access to kernel addresses. No IOTLB flushes are automatically done

View file

@ -16,6 +16,7 @@ enum io_pgtable_fmt {
ARM_V7S,
ARM_MALI_LPAE,
AMD_IOMMU_V1,
APPLE_DART,
IO_PGTABLE_NUM_FMTS,
};
@ -73,10 +74,6 @@ struct io_pgtable_cfg {
* to support up to 35 bits PA where the bit32, bit33 and bit34 are
* encoded in the bit9, bit4 and bit5 of the PTE respectively.
*
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
* on unmap, for DMA domains using the flush queue mechanism for
* delayed invalidation.
*
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
* for use in the upper half of a split address space.
*
@ -86,7 +83,6 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
unsigned long quirks;
@ -136,6 +132,11 @@ struct io_pgtable_cfg {
u64 transtab;
u64 memattr;
} arm_mali_lpae_cfg;
struct {
u64 ttbr[4];
u32 n_ttbrs;
} apple_dart_cfg;
};
};
@ -143,7 +144,9 @@ struct io_pgtable_cfg {
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
*
* @map: Map a physically contiguous memory region.
* @map_pages: Map a physically contiguous range of pages of the same size.
* @unmap: Unmap a physically contiguous memory region.
* @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
* @iova_to_phys: Translate iova to physical address.
*
* These functions map directly onto the iommu_ops member functions with
@ -152,8 +155,14 @@ struct io_pgtable_cfg {
struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped);
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather);
size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
};
@ -246,5 +255,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
#endif /* __IO_PGTABLE_H */

View file

@ -7,17 +7,18 @@
#if defined(CONFIG_IO_URING)
struct sock *io_uring_get_socket(struct file *file);
void __io_uring_cancel(struct files_struct *files);
void __io_uring_cancel(bool cancel_all);
void __io_uring_free(struct task_struct *tsk);
static inline void io_uring_files_cancel(struct files_struct *files)
static inline void io_uring_files_cancel(void)
{
if (current->io_uring)
__io_uring_cancel(files);
__io_uring_cancel(false);
}
static inline void io_uring_task_cancel(void)
{
return io_uring_files_cancel(NULL);
if (current->io_uring)
__io_uring_cancel(true);
}
static inline void io_uring_free(struct task_struct *tsk)
{
@ -32,7 +33,7 @@ static inline struct sock *io_uring_get_socket(struct file *file)
static inline void io_uring_task_cancel(void)
{
}
static inline void io_uring_files_cancel(struct files_struct *files)
static inline void io_uring_files_cancel(void)
{
}
static inline void io_uring_free(struct task_struct *tsk)

13
include/linux/ioam6.h Normal file
View file

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* IPv6 IOAM
*
* Author:
* Justin Iurman <justin.iurman@uliege.be>
*/
#ifndef _LINUX_IOAM6_H
#define _LINUX_IOAM6_H
#include <uapi/linux/ioam6.h>
#endif /* _LINUX_IOAM6_H */

View file

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* IPv6 IOAM Generic Netlink API
*
* Author:
* Justin Iurman <justin.iurman@uliege.be>
*/
#ifndef _LINUX_IOAM6_GENL_H
#define _LINUX_IOAM6_GENL_H
#include <uapi/linux/ioam6_genl.h>
#endif /* _LINUX_IOAM6_GENL_H */

View file

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* IPv6 IOAM Lightweight Tunnel API
*
* Author:
* Justin Iurman <justin.iurman@uliege.be>
*/
#ifndef _LINUX_IOAM6_IPTUNNEL_H
#define _LINUX_IOAM6_IPTUNNEL_H
#include <uapi/linux/ioam6_iptunnel.h>
#endif /* _LINUX_IOAM6_IPTUNNEL_H */

View file

@ -91,12 +91,29 @@ struct iomap {
const struct iomap_page_ops *page_ops;
};
static inline sector_t
iomap_sector(struct iomap *iomap, loff_t pos)
static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
{
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
/*
* Returns the inline data pointer for logical offset @pos.
*/
static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
{
return iomap->inline_data + pos - iomap->offset;
}
/*
* Check if the mapping's length is within the valid range for inline data.
* This is used to guard against accessing data beyond the page inline_data
* points at.
*/
static inline bool iomap_inline_data_valid(const struct iomap *iomap)
{
return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
}
/*
* When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
* and page_done will be called for each page written to. This only applies to
@ -108,10 +125,9 @@ iomap_sector(struct iomap *iomap, loff_t pos)
* associated page could not be obtained.
*/
struct iomap_page_ops {
int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
struct iomap *iomap);
int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len);
void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
struct page *page, struct iomap *iomap);
struct page *page);
};
/*
@ -124,6 +140,7 @@ struct iomap_page_ops {
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
#define IOMAP_NOWAIT (1 << 5) /* do not block */
#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
struct iomap_ops {
/*
@ -145,15 +162,61 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
/*
* Main iomap iterator function.
/**
* struct iomap_iter - Iterate through a range of a file
* @inode: Set at the start of the iteration and should not change.
* @pos: The current file position we are operating on. It is updated by
* calls to iomap_iter(). Treat as read-only in the body.
* @len: The remaining length of the file segment we're operating on.
* It is updated at the same time as @pos.
* @processed: The number of bytes processed by the body in the most recent
* iteration, or a negative errno. 0 causes the iteration to stop.
* @flags: Zero or more of the iomap_begin flags above.
* @iomap: Map describing the I/O iteration
* @srcmap: Source map for COW operations
*/
typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
void *data, struct iomap *iomap, struct iomap *srcmap);
struct iomap_iter {
struct inode *inode;
loff_t pos;
u64 len;
s64 processed;
unsigned flags;
struct iomap iomap;
struct iomap srcmap;
};
loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, const struct iomap_ops *ops, void *data,
iomap_actor_t actor);
int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
/**
* iomap_length - length of the current iomap iteration
* @iter: iteration structure
*
* Returns the length that the operation applies to for the current iteration.
*/
static inline u64 iomap_length(const struct iomap_iter *iter)
{
u64 end = iter->iomap.offset + iter->iomap.length;
if (iter->srcmap.type != IOMAP_HOLE)
end = min(end, iter->srcmap.offset + iter->srcmap.length);
return min(iter->len, end - iter->pos);
}
/**
* iomap_iter_srcmap - return the source map for the current iomap iteration
* @i: iteration structure
*
* Write operations on file systems with reflink support might require a
* source and a destination map. This function retourns the source map
* for a given operation, which may or may no be identical to the destination
* map in &i->iomap.
*/
static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
{
if (i->srcmap.type != IOMAP_HOLE)
return &i->srcmap;
return &i->iomap;
}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
@ -250,8 +313,8 @@ int iomap_writepages(struct address_space *mapping,
struct iomap_dio_ops {
int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
unsigned flags);
blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap,
struct bio *bio, loff_t file_offset);
blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
loff_t file_offset);
};
/*

View file

@ -40,6 +40,7 @@ struct iommu_domain;
struct notifier_block;
struct iommu_sva;
struct iommu_fault_event;
struct iommu_dma_cookie;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
@ -60,6 +61,7 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
/*
* This are the possible domain-types
@ -72,12 +74,17 @@ struct iommu_domain_geometry {
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
* IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
* invalidation.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API | \
__IOMMU_DOMAIN_DMA_FQ)
struct iommu_domain {
unsigned type;
@ -86,9 +93,14 @@ struct iommu_domain {
iommu_fault_handler_t handler;
void *handler_token;
struct iommu_domain_geometry geometry;
void *iova_cookie;
struct iommu_dma_cookie *iova_cookie;
};
static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
{
return domain->type & __IOMMU_DOMAIN_DMA_API;
}
enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */
@ -160,16 +172,22 @@ enum iommu_dev_features {
* @start: IOVA representing the start of the range to be flushed
* @end: IOVA representing the end of the range to be flushed (inclusive)
* @pgsize: The interval at which to perform the flush
* @freelist: Removed pages to free after sync
* @queued: Indicates that the flush will be queued
*
* This structure is intended to be updated by multiple calls to the
* ->unmap() function in struct iommu_ops before eventually being passed
* into ->iotlb_sync().
* into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
* ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
* them. @queued is set to indicate when ->iotlb_flush_all() will be called
* later instead of ->iotlb_sync(), so drivers may optimise accordingly.
*/
struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
struct page *freelist;
bool queued;
};
/**
@ -180,7 +198,10 @@ struct iommu_iotlb_gather {
* @attach_dev: attach device to an iommu domain
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @unmap_pages: unmap a number of pages of the same size from an iommu domain
* @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
* @iotlb_sync_map: Sync mappings created recently using @map to the hardware
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
@ -229,8 +250,14 @@ struct iommu_ops {
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
size_t size);
@ -414,11 +441,11 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot);
extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot);
extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot);
extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@ -476,8 +503,7 @@ int iommu_enable_nesting(struct iommu_domain *domain);
int iommu_set_pgtable_quirks(struct iommu_domain *domain,
unsigned long quirks);
void iommu_set_dma_strict(bool val);
bool iommu_get_dma_strict(struct iommu_domain *domain);
void iommu_set_dma_strict(void);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
@ -497,29 +523,80 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain,
iommu_iotlb_gather_init(iotlb_gather);
}
/**
* iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
*
* @gather: TLB gather data
* @iova: start of page to invalidate
* @size: size of page to invalidate
*
* Helper for IOMMU drivers to check whether a new range and the gathered range
* are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
* than merging the two, which might lead to unnecessary invalidations.
*/
static inline
bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
unsigned long start = iova, end = start + size - 1;
return gather->end != 0 &&
(end + 1 < gather->start || start > gather->end + 1);
}
/**
* iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
* @gather: TLB gather data
* @iova: start of page to invalidate
* @size: size of page to invalidate
*
* Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
* where only the address range matters, and simply minimising intermediate
* syncs is preferred.
*/
static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
unsigned long end = iova + size - 1;
if (gather->start > iova)
gather->start = iova;
if (gather->end < end)
gather->end = end;
}
/**
* iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
* @domain: IOMMU domain to be invalidated
* @gather: TLB gather data
* @iova: start of page to invalidate
* @size: size of page to invalidate
*
* Helper for IOMMU drivers to build invalidation commands based on individual
* pages, or with page size/table level hints which cannot be gathered if they
* differ.
*/
static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
unsigned long start = iova, end = start + size - 1;
/*
* If the new page is disjoint from the current range or is mapped at
* a different granularity, then sync the TLB so that the gather
* structure can be rewritten.
*/
if (gather->pgsize != size ||
end + 1 < gather->start || start > gather->end + 1) {
if (gather->pgsize)
iommu_iotlb_sync(domain, gather);
gather->pgsize = size;
}
if ((gather->pgsize && gather->pgsize != size) ||
iommu_iotlb_gather_is_disjoint(gather, iova, size))
iommu_iotlb_sync(domain, gather);
if (gather->end < end)
gather->end = end;
gather->pgsize = size;
iommu_iotlb_gather_add_range(gather, iova, size);
}
if (gather->start > start)
gather->start = start;
static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
{
return gather && gather->queued;
}
/* PCI device grouping function */
@ -679,18 +756,18 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
return 0;
}
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
return 0;
return -ENODEV;
}
static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
return 0;
return -ENODEV;
}
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
@ -870,6 +947,11 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
{
}
static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
{
return false;
}
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}

View file

@ -6,46 +6,22 @@
#include <linux/sched/rt.h>
#include <linux/iocontext.h>
/*
* Gives us 8 prio classes with 13-bits of data for each class
*/
#define IOPRIO_CLASS_SHIFT (13)
#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
#include <uapi/linux/ioprio.h>
/*
* These are the io priority groups as implemented by CFQ. RT is the realtime
* class, it always gets premium service. BE is the best-effort scheduling
* class, the default for any process. IDLE is the idle scheduling class, it
* is only served when no one else is using the disk.
* Default IO priority.
*/
enum {
IOPRIO_CLASS_NONE,
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE,
};
#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM)
/*
* 8 best effort priority levels are supported
* Check that a priority value has a valid class.
*/
#define IOPRIO_BE_NR (8)
static inline bool ioprio_valid(unsigned short ioprio)
{
unsigned short class = IOPRIO_PRIO_CLASS(ioprio);
enum {
IOPRIO_WHO_PROCESS = 1,
IOPRIO_WHO_PGRP,
IOPRIO_WHO_USER,
};
/*
* Fallback BE priority
*/
#define IOPRIO_NORM (4)
return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
}
/*
* if process has set io priority explicitly, use that. if not, convert
@ -80,7 +56,7 @@ static inline int get_current_ioprio(void)
if (ioc)
return ioc->ioprio;
return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
return IOPRIO_DEFAULT;
}
/*

View file

@ -76,6 +76,9 @@ struct ipv6_devconf {
__s32 disable_policy;
__s32 ndisc_tclass;
__s32 rpl_seg_enabled;
__u32 ioam6_id;
__u32 ioam6_id_wide;
__u8 ioam6_enabled;
struct ctl_table_header *sysctl_header;
};

View file

@ -13,26 +13,22 @@
#ifndef ISCSI_IBFT_H
#define ISCSI_IBFT_H
#include <linux/acpi.h>
#include <linux/types.h>
/*
* Logical location of iSCSI Boot Format Table.
* If the value is NULL there is no iBFT on the machine.
* Physical location of iSCSI Boot Format Table.
* If the value is 0 there is no iBFT on the machine.
*/
extern struct acpi_table_ibft *ibft_addr;
extern phys_addr_t ibft_phys_addr;
/*
* Routine used to find and reserve the iSCSI Boot Format Table. The
* mapped address is set in the ibft_addr variable.
* physical address is set in the ibft_phys_addr variable.
*/
#ifdef CONFIG_ISCSI_IBFT_FIND
unsigned long find_ibft_region(unsigned long *sizep);
void reserve_ibft_region(void);
#else
static inline unsigned long find_ibft_region(unsigned long *sizep)
{
*sizep = 0;
return 0;
}
static inline void reserve_ibft_region(void) {}
#endif
#endif /* ISCSI_IBFT_H */

Some files were not shown because too many files have changed in this diff Show more