Merge branch 'topic/cs35l41' into for-next
Pull CS35L41 codec updates Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
commit
651a887984
13378 changed files with 1050247 additions and 313815 deletions
|
|
@ -526,7 +526,7 @@ acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
|
|||
int acpi_resources_are_enforced(void);
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
void __init acpi_check_s4_hw_signature(int check);
|
||||
extern int acpi_check_s4_hw_signature;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
|
@ -580,6 +580,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
|
|||
extern bool osc_sb_apei_support_acked;
|
||||
extern bool osc_pc_lpi_support_confirmed;
|
||||
extern bool osc_sb_native_usb4_support_confirmed;
|
||||
extern bool osc_sb_cppc_not_supported;
|
||||
|
||||
/* USB4 Capabilities */
|
||||
#define OSC_USB_USB3_TUNNELING 0x00000001
|
||||
|
|
@ -691,7 +692,7 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
|
|||
int acpi_device_modalias(struct device *, char *, int);
|
||||
|
||||
struct platform_device *acpi_create_platform_device(struct acpi_device *,
|
||||
struct property_entry *);
|
||||
const struct property_entry *);
|
||||
#define ACPI_PTR(_ptr) (_ptr)
|
||||
|
||||
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
|
||||
|
|
@ -930,7 +931,7 @@ static inline int acpi_device_modalias(struct device *dev,
|
|||
|
||||
static inline struct platform_device *
|
||||
acpi_create_platform_device(struct acpi_device *adev,
|
||||
struct property_entry *properties)
|
||||
const struct property_entry *properties)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -1023,7 +1024,15 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
|
|||
|
||||
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
|
||||
u32 val_a, u32 val_b);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
struct acpi_s2idle_dev_ops {
|
||||
struct list_head list_node;
|
||||
void (*prepare)(void);
|
||||
void (*restore)(void);
|
||||
};
|
||||
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
|
||||
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
|
||||
#endif /* CONFIG_X86 */
|
||||
#ifndef CONFIG_IA64
|
||||
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
|
||||
#else
|
||||
|
|
|
|||
13
include/linux/acpi_agdi.h
Normal file
13
include/linux/acpi_agdi.h
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ACPI_AGDI_H__
|
||||
#define __ACPI_AGDI_H__
|
||||
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#ifdef CONFIG_ACPI_AGDI
|
||||
void __init acpi_agdi_init(void);
|
||||
#else
|
||||
static inline void acpi_agdi_init(void) {}
|
||||
#endif
|
||||
#endif /* __ACPI_AGDI_H__ */
|
||||
|
|
@ -117,30 +117,9 @@ void amba_device_put(struct amba_device *);
|
|||
int amba_device_add(struct amba_device *, struct resource *);
|
||||
int amba_device_register(struct amba_device *, struct resource *);
|
||||
void amba_device_unregister(struct amba_device *);
|
||||
struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
|
||||
int amba_request_regions(struct amba_device *, const char *);
|
||||
void amba_release_regions(struct amba_device *);
|
||||
|
||||
static inline int amba_pclk_enable(struct amba_device *dev)
|
||||
{
|
||||
return clk_enable(dev->pclk);
|
||||
}
|
||||
|
||||
static inline void amba_pclk_disable(struct amba_device *dev)
|
||||
{
|
||||
clk_disable(dev->pclk);
|
||||
}
|
||||
|
||||
static inline int amba_pclk_prepare(struct amba_device *dev)
|
||||
{
|
||||
return clk_prepare(dev->pclk);
|
||||
}
|
||||
|
||||
static inline void amba_pclk_unprepare(struct amba_device *dev)
|
||||
{
|
||||
clk_unprepare(dev->pclk);
|
||||
}
|
||||
|
||||
/* Some drivers don't use the struct amba_device */
|
||||
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
|
||||
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
|
||||
|
|
|
|||
|
|
@ -11,6 +11,10 @@
|
|||
void topology_normalize_cpu_scale(void);
|
||||
int topology_update_cpu_topology(void);
|
||||
|
||||
#ifdef CONFIG_ACPI_CPPC_LIB
|
||||
void topology_init_cpu_capacity_cppc(void);
|
||||
#endif
|
||||
|
||||
struct device_node;
|
||||
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
|
||||
|
||||
|
|
|
|||
|
|
@ -92,6 +92,11 @@
|
|||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x7fff)
|
||||
|
||||
#define ARM_SMCCC_ARCH_WORKAROUND_3 \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_32, \
|
||||
0, 0x3fff)
|
||||
|
||||
#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_32, \
|
||||
|
|
|
|||
|
|
@ -46,9 +46,11 @@ int sdei_unregister_ghes(struct ghes *ghes);
|
|||
/* For use by arch code when CPU hotplug notifiers are not appropriate. */
|
||||
int sdei_mask_local_cpu(void);
|
||||
int sdei_unmask_local_cpu(void);
|
||||
void __init sdei_init(void);
|
||||
#else
|
||||
static inline int sdei_mask_local_cpu(void) { return 0; }
|
||||
static inline int sdei_unmask_local_cpu(void) { return 0; }
|
||||
static inline void sdei_init(void) { }
|
||||
#endif /* CONFIG_ARM_SDE_INTERFACE */
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -324,12 +324,12 @@ enum {
|
|||
ATA_LOG_NCQ_NON_DATA = 0x12,
|
||||
ATA_LOG_NCQ_SEND_RECV = 0x13,
|
||||
ATA_LOG_IDENTIFY_DEVICE = 0x30,
|
||||
ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
|
||||
|
||||
/* Identify device log pages: */
|
||||
ATA_LOG_SECURITY = 0x06,
|
||||
ATA_LOG_SATA_SETTINGS = 0x08,
|
||||
ATA_LOG_ZONED_INFORMATION = 0x09,
|
||||
ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
|
||||
|
||||
/* Identify device SATA settings log:*/
|
||||
ATA_LOG_DEVSLP_OFFSET = 0x30,
|
||||
|
|
|
|||
|
|
@ -151,7 +151,16 @@
|
|||
static __always_inline int
|
||||
arch_atomic_read_acquire(const atomic_t *v)
|
||||
{
|
||||
return smp_load_acquire(&(v)->counter);
|
||||
int ret;
|
||||
|
||||
if (__native_word(atomic_t)) {
|
||||
ret = smp_load_acquire(&(v)->counter);
|
||||
} else {
|
||||
ret = arch_atomic_read(v);
|
||||
__atomic_acquire_fence();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#define arch_atomic_read_acquire arch_atomic_read_acquire
|
||||
#endif
|
||||
|
|
@ -160,7 +169,12 @@ arch_atomic_read_acquire(const atomic_t *v)
|
|||
static __always_inline void
|
||||
arch_atomic_set_release(atomic_t *v, int i)
|
||||
{
|
||||
smp_store_release(&(v)->counter, i);
|
||||
if (__native_word(atomic_t)) {
|
||||
smp_store_release(&(v)->counter, i);
|
||||
} else {
|
||||
__atomic_release_fence();
|
||||
arch_atomic_set(v, i);
|
||||
}
|
||||
}
|
||||
#define arch_atomic_set_release arch_atomic_set_release
|
||||
#endif
|
||||
|
|
@ -1258,7 +1272,16 @@ arch_atomic_dec_if_positive(atomic_t *v)
|
|||
static __always_inline s64
|
||||
arch_atomic64_read_acquire(const atomic64_t *v)
|
||||
{
|
||||
return smp_load_acquire(&(v)->counter);
|
||||
s64 ret;
|
||||
|
||||
if (__native_word(atomic64_t)) {
|
||||
ret = smp_load_acquire(&(v)->counter);
|
||||
} else {
|
||||
ret = arch_atomic64_read(v);
|
||||
__atomic_acquire_fence();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#define arch_atomic64_read_acquire arch_atomic64_read_acquire
|
||||
#endif
|
||||
|
|
@ -1267,7 +1290,12 @@ arch_atomic64_read_acquire(const atomic64_t *v)
|
|||
static __always_inline void
|
||||
arch_atomic64_set_release(atomic64_t *v, s64 i)
|
||||
{
|
||||
smp_store_release(&(v)->counter, i);
|
||||
if (__native_word(atomic64_t)) {
|
||||
smp_store_release(&(v)->counter, i);
|
||||
} else {
|
||||
__atomic_release_fence();
|
||||
arch_atomic64_set(v, i);
|
||||
}
|
||||
}
|
||||
#define arch_atomic64_set_release arch_atomic64_set_release
|
||||
#endif
|
||||
|
|
@ -2358,4 +2386,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
|
|||
#endif
|
||||
|
||||
#endif /* _LINUX_ATOMIC_FALLBACK_H */
|
||||
// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
|
||||
// 8e2cc06bc0d2c0967d2f8424762bd48555ee40ae
|
||||
|
|
|
|||
|
|
@ -207,14 +207,6 @@ struct backing_dev_info {
|
|||
#endif
|
||||
};
|
||||
|
||||
enum {
|
||||
BLK_RW_ASYNC = 0,
|
||||
BLK_RW_SYNC = 1,
|
||||
};
|
||||
|
||||
void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
|
||||
void set_bdi_congested(struct backing_dev_info *bdi, int sync);
|
||||
|
||||
struct wb_lock_cookie {
|
||||
bool locked;
|
||||
unsigned long flags;
|
||||
|
|
|
|||
|
|
@ -135,13 +135,6 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb)
|
|||
|
||||
struct backing_dev_info *inode_to_bdi(struct inode *inode);
|
||||
|
||||
static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
|
||||
{
|
||||
return wb->congested & cong_bits;
|
||||
}
|
||||
|
||||
long congestion_wait(int sync, long timeout);
|
||||
|
||||
static inline bool mapping_can_writeback(struct address_space *mapping)
|
||||
{
|
||||
return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
|
||||
|
|
@ -162,7 +155,6 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
|
|||
gfp_t gfp);
|
||||
void wb_memcg_offline(struct mem_cgroup *memcg);
|
||||
void wb_blkcg_offline(struct blkcg *blkcg);
|
||||
int inode_congested(struct inode *inode, int cong_bits);
|
||||
|
||||
/**
|
||||
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
|
||||
|
|
@ -390,50 +382,8 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int inode_congested(struct inode *inode, int cong_bits)
|
||||
{
|
||||
return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CGROUP_WRITEBACK */
|
||||
|
||||
static inline int inode_read_congested(struct inode *inode)
|
||||
{
|
||||
return inode_congested(inode, 1 << WB_sync_congested);
|
||||
}
|
||||
|
||||
static inline int inode_write_congested(struct inode *inode)
|
||||
{
|
||||
return inode_congested(inode, 1 << WB_async_congested);
|
||||
}
|
||||
|
||||
static inline int inode_rw_congested(struct inode *inode)
|
||||
{
|
||||
return inode_congested(inode, (1 << WB_sync_congested) |
|
||||
(1 << WB_async_congested));
|
||||
}
|
||||
|
||||
static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
|
||||
{
|
||||
return wb_congested(&bdi->wb, cong_bits);
|
||||
}
|
||||
|
||||
static inline int bdi_read_congested(struct backing_dev_info *bdi)
|
||||
{
|
||||
return bdi_congested(bdi, 1 << WB_sync_congested);
|
||||
}
|
||||
|
||||
static inline int bdi_write_congested(struct backing_dev_info *bdi)
|
||||
{
|
||||
return bdi_congested(bdi, 1 << WB_async_congested);
|
||||
}
|
||||
|
||||
static inline int bdi_rw_congested(struct backing_dev_info *bdi)
|
||||
{
|
||||
return bdi_congested(bdi, (1 << WB_sync_congested) |
|
||||
(1 << WB_async_congested));
|
||||
}
|
||||
|
||||
const char *bdi_dev_name(struct backing_dev_info *bdi);
|
||||
|
||||
#endif /* _LINUX_BACKING_DEV_H */
|
||||
|
|
|
|||
|
|
@ -80,12 +80,6 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
|
|||
|
||||
#ifdef CONFIG_BALLOON_COMPACTION
|
||||
extern const struct address_space_operations balloon_aops;
|
||||
extern bool balloon_page_isolate(struct page *page,
|
||||
isolate_mode_t mode);
|
||||
extern void balloon_page_putback(struct page *page);
|
||||
extern int balloon_page_migrate(struct address_space *mapping,
|
||||
struct page *newpage,
|
||||
struct page *page, enum migrate_mode mode);
|
||||
|
||||
/*
|
||||
* balloon_page_insert - insert a page into the balloon's page list and make
|
||||
|
|
@ -155,22 +149,6 @@ static inline void balloon_page_delete(struct page *page)
|
|||
list_del(&page->lru);
|
||||
}
|
||||
|
||||
static inline bool balloon_page_isolate(struct page *page)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void balloon_page_putback(struct page *page)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static inline int balloon_page_migrate(struct page *newpage,
|
||||
struct page *page, enum migrate_mode mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline gfp_t balloon_mapping_gfp_mask(void)
|
||||
{
|
||||
return GFP_HIGHUSER;
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
#define LINUX_BCMA_DRIVER_CC_H_
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/brcmnand.h>
|
||||
#include <linux/gpio.h>
|
||||
|
||||
/** ChipCommon core registers. **/
|
||||
|
|
@ -599,6 +600,10 @@ struct bcma_sflash {
|
|||
|
||||
#ifdef CONFIG_BCMA_NFLASH
|
||||
struct bcma_nflash {
|
||||
/* Must be the fist member for the brcmnand driver to
|
||||
* de-reference that structure.
|
||||
*/
|
||||
struct brcmnand_platform_data brcmnand_info;
|
||||
bool present;
|
||||
bool boot; /* This is the flash the SoC boots from */
|
||||
};
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
#include <uapi/linux/binfmts.h>
|
||||
|
||||
struct filename;
|
||||
struct coredump_params;
|
||||
|
||||
#define CORENAME_MAX_SIZE 128
|
||||
|
||||
|
|
@ -77,18 +78,6 @@ struct linux_binprm {
|
|||
#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3
|
||||
#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT)
|
||||
|
||||
/* Function parameter for binfmt->coredump */
|
||||
struct coredump_params {
|
||||
const kernel_siginfo_t *siginfo;
|
||||
struct pt_regs *regs;
|
||||
struct file *file;
|
||||
unsigned long limit;
|
||||
unsigned long mm_flags;
|
||||
loff_t written;
|
||||
loff_t pos;
|
||||
loff_t to_skip;
|
||||
};
|
||||
|
||||
/*
|
||||
* This structure defines the functions that are used to load the binary formats that
|
||||
* linux accepts.
|
||||
|
|
@ -98,8 +87,10 @@ struct linux_binfmt {
|
|||
struct module *module;
|
||||
int (*load_binary)(struct linux_binprm *);
|
||||
int (*load_shlib)(struct file *);
|
||||
#ifdef CONFIG_COREDUMP
|
||||
int (*core_dump)(struct coredump_params *cprm);
|
||||
unsigned long min_coredump; /* minimal dump size */
|
||||
#endif
|
||||
} __randomize_layout;
|
||||
|
||||
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio)
|
|||
{
|
||||
return bio_op(bio) == REQ_OP_DISCARD ||
|
||||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
|
||||
bio_op(bio) == REQ_OP_WRITE_SAME ||
|
||||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
|
||||
}
|
||||
|
||||
|
|
@ -186,8 +185,6 @@ static inline unsigned bio_segments(struct bio *bio)
|
|||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return 0;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
@ -405,21 +402,25 @@ extern void bioset_exit(struct bio_set *);
|
|||
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
|
||||
extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
|
||||
|
||||
struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
|
||||
struct bio_set *bs);
|
||||
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
|
||||
struct bio_set *bs);
|
||||
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
|
||||
unsigned int opf, gfp_t gfp_mask,
|
||||
struct bio_set *bs);
|
||||
struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
|
||||
unsigned short nr_vecs, unsigned int opf, struct bio_set *bs);
|
||||
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
|
||||
extern void bio_put(struct bio *);
|
||||
|
||||
extern void __bio_clone_fast(struct bio *, struct bio *);
|
||||
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
|
||||
struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
|
||||
gfp_t gfp, struct bio_set *bs);
|
||||
int bio_init_clone(struct block_device *bdev, struct bio *bio,
|
||||
struct bio *bio_src, gfp_t gfp);
|
||||
|
||||
extern struct bio_set fs_bio_set;
|
||||
|
||||
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
|
||||
static inline struct bio *bio_alloc(struct block_device *bdev,
|
||||
unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask)
|
||||
{
|
||||
return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
|
||||
return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
|
||||
}
|
||||
|
||||
void submit_bio(struct bio *bio);
|
||||
|
|
@ -454,10 +455,10 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
|
|||
struct request_queue;
|
||||
|
||||
extern int submit_bio_wait(struct bio *bio);
|
||||
extern void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
unsigned short max_vecs);
|
||||
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
|
||||
unsigned short max_vecs, unsigned int opf);
|
||||
extern void bio_uninit(struct bio *);
|
||||
extern void bio_reset(struct bio *);
|
||||
void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf);
|
||||
void bio_chain(struct bio *, struct bio *);
|
||||
|
||||
int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
|
||||
|
|
@ -487,8 +488,6 @@ static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
|
|||
__bio_release_pages(bio, mark_dirty);
|
||||
}
|
||||
|
||||
extern const char *bio_devname(struct bio *bio, char *buffer);
|
||||
|
||||
#define bio_dev(bio) \
|
||||
disk_devt((bio)->bi_bdev->bd_disk)
|
||||
|
||||
|
|
@ -515,13 +514,6 @@ static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
|
|||
bio_associate_blkg(bio);
|
||||
}
|
||||
|
||||
static inline void bio_copy_dev(struct bio *dst, struct bio *src)
|
||||
{
|
||||
bio_clear_flag(dst, BIO_REMAPPED);
|
||||
dst->bi_bdev = src->bi_bdev;
|
||||
bio_clone_blkg_association(dst, src);
|
||||
}
|
||||
|
||||
/*
|
||||
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
|
||||
*
|
||||
|
|
@ -790,6 +782,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
|
|||
bio->bi_opf |= REQ_NOWAIT;
|
||||
}
|
||||
|
||||
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
|
||||
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
|
||||
unsigned int nr_pages, unsigned int opf, gfp_t gfp);
|
||||
|
||||
#endif /* __LINUX_BIO_H */
|
||||
|
|
|
|||
|
|
@ -19,6 +19,9 @@
|
|||
*
|
||||
* Example:
|
||||
*
|
||||
* #include <linux/bitfield.h>
|
||||
* #include <linux/bits.h>
|
||||
*
|
||||
* #define REG_FIELD_A GENMASK(6, 0)
|
||||
* #define REG_FIELD_B BIT(7)
|
||||
* #define REG_FIELD_C GENMASK(15, 8)
|
||||
|
|
|
|||
|
|
@ -25,14 +25,8 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
|
||||
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
|
||||
|
||||
/* Max limits for throttle policy */
|
||||
#define THROTL_IOPS_MAX UINT_MAX
|
||||
#define FC_APPID_LEN 129
|
||||
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
|
||||
enum blkg_iostat_type {
|
||||
|
|
@ -44,6 +38,7 @@ enum blkg_iostat_type {
|
|||
};
|
||||
|
||||
struct blkcg_gq;
|
||||
struct blkg_policy_data;
|
||||
|
||||
struct blkcg {
|
||||
struct cgroup_subsys_state css;
|
||||
|
|
@ -76,36 +71,6 @@ struct blkg_iostat_set {
|
|||
struct blkg_iostat last;
|
||||
};
|
||||
|
||||
/*
|
||||
* A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
|
||||
* request_queue (q). This is used by blkcg policies which need to track
|
||||
* information per blkcg - q pair.
|
||||
*
|
||||
* There can be multiple active blkcg policies and each blkg:policy pair is
|
||||
* represented by a blkg_policy_data which is allocated and freed by each
|
||||
* policy's pd_alloc/free_fn() methods. A policy can allocate private data
|
||||
* area by allocating larger data structure which embeds blkg_policy_data
|
||||
* at the beginning.
|
||||
*/
|
||||
struct blkg_policy_data {
|
||||
/* the blkg and policy id this per-policy data belongs to */
|
||||
struct blkcg_gq *blkg;
|
||||
int plid;
|
||||
};
|
||||
|
||||
/*
|
||||
* Policies that need to keep per-blkcg data which is independent from any
|
||||
* request_queue associated to it should implement cpd_alloc/free_fn()
|
||||
* methods. A policy can allocate private data area by allocating larger
|
||||
* data structure which embeds blkcg_policy_data at the beginning.
|
||||
* cpd_init() is invoked to let each policy handle per-blkcg data.
|
||||
*/
|
||||
struct blkcg_policy_data {
|
||||
/* the blkcg and policy id this per-policy data belongs to */
|
||||
struct blkcg *blkcg;
|
||||
int plid;
|
||||
};
|
||||
|
||||
/* association between a blk cgroup and a request queue */
|
||||
struct blkcg_gq {
|
||||
/* Pointer to the associated request_queue */
|
||||
|
|
@ -130,7 +95,10 @@ struct blkcg_gq {
|
|||
|
||||
spinlock_t async_bio_lock;
|
||||
struct bio_list async_bios;
|
||||
struct work_struct async_bio_work;
|
||||
union {
|
||||
struct work_struct async_bio_work;
|
||||
struct work_struct free_work;
|
||||
};
|
||||
|
||||
atomic_t use_delay;
|
||||
atomic64_t delay_nsec;
|
||||
|
|
@ -141,120 +109,17 @@ struct blkcg_gq {
|
|||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
|
||||
typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
|
||||
typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
|
||||
typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
|
||||
typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
|
||||
struct request_queue *q, struct blkcg *blkcg);
|
||||
typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
|
||||
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
|
||||
typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
|
||||
struct seq_file *s);
|
||||
|
||||
struct blkcg_policy {
|
||||
int plid;
|
||||
/* cgroup files for the policy */
|
||||
struct cftype *dfl_cftypes;
|
||||
struct cftype *legacy_cftypes;
|
||||
|
||||
/* operations */
|
||||
blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
|
||||
blkcg_pol_init_cpd_fn *cpd_init_fn;
|
||||
blkcg_pol_free_cpd_fn *cpd_free_fn;
|
||||
blkcg_pol_bind_cpd_fn *cpd_bind_fn;
|
||||
|
||||
blkcg_pol_alloc_pd_fn *pd_alloc_fn;
|
||||
blkcg_pol_init_pd_fn *pd_init_fn;
|
||||
blkcg_pol_online_pd_fn *pd_online_fn;
|
||||
blkcg_pol_offline_pd_fn *pd_offline_fn;
|
||||
blkcg_pol_free_pd_fn *pd_free_fn;
|
||||
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
|
||||
blkcg_pol_stat_pd_fn *pd_stat_fn;
|
||||
};
|
||||
|
||||
extern struct blkcg blkcg_root;
|
||||
extern struct cgroup_subsys_state * const blkcg_root_css;
|
||||
extern bool blkcg_debug_stats;
|
||||
|
||||
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
|
||||
struct request_queue *q, bool update_hint);
|
||||
int blkcg_init_queue(struct request_queue *q);
|
||||
void blkcg_exit_queue(struct request_queue *q);
|
||||
|
||||
/* Blkio controller policy registration */
|
||||
int blkcg_policy_register(struct blkcg_policy *pol);
|
||||
void blkcg_policy_unregister(struct blkcg_policy *pol);
|
||||
int blkcg_activate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol);
|
||||
void blkcg_deactivate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol);
|
||||
|
||||
const char *blkg_dev_name(struct blkcg_gq *blkg);
|
||||
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
|
||||
u64 (*prfill)(struct seq_file *,
|
||||
struct blkg_policy_data *, int),
|
||||
const struct blkcg_policy *pol, int data,
|
||||
bool show_total);
|
||||
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
|
||||
|
||||
struct blkg_conf_ctx {
|
||||
struct block_device *bdev;
|
||||
struct blkcg_gq *blkg;
|
||||
char *body;
|
||||
};
|
||||
|
||||
struct block_device *blkcg_conf_open_bdev(char **inputp);
|
||||
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
char *input, struct blkg_conf_ctx *ctx);
|
||||
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
|
||||
|
||||
/**
|
||||
* blkcg_css - find the current css
|
||||
*
|
||||
* Find the css associated with either the kthread or the current task.
|
||||
* This may return a dying css, so it is up to the caller to use tryget logic
|
||||
* to confirm it is alive and well.
|
||||
*/
|
||||
static inline struct cgroup_subsys_state *blkcg_css(void)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
css = kthread_blkcg();
|
||||
if (css)
|
||||
return css;
|
||||
return task_css(current, io_cgrp_id);
|
||||
}
|
||||
void blkcg_destroy_blkgs(struct blkcg *blkcg);
|
||||
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
|
||||
void blkcg_maybe_throttle_current(void);
|
||||
|
||||
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
|
||||
{
|
||||
return css ? container_of(css, struct blkcg, css) : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __bio_blkcg - internal, inconsistent version to get blkcg
|
||||
*
|
||||
* DO NOT USE.
|
||||
* This function is inconsistent and consequently is dangerous to use. The
|
||||
* first part of the function returns a blkcg where a reference is owned by the
|
||||
* bio. This means it does not need to be rcu protected as it cannot go away
|
||||
* with the bio owning a reference to it. However, the latter potentially gets
|
||||
* it from task_css(). This can race against task migration and the cgroup
|
||||
* dying. It is also semantically different as it must be called rcu protected
|
||||
* and is susceptible to failure when trying to get a reference to it.
|
||||
* Therefore, it is not ok to assume that *_get() will always succeed on the
|
||||
* blkcg returned here.
|
||||
*/
|
||||
static inline struct blkcg *__bio_blkcg(struct bio *bio)
|
||||
{
|
||||
if (bio && bio->bi_blkg)
|
||||
return bio->bi_blkg->blkcg;
|
||||
return css_to_blkcg(blkcg_css());
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_blkcg - grab the blkcg associated with a bio
|
||||
* @bio: target bio
|
||||
|
|
@ -290,22 +155,6 @@ static inline bool blk_cgroup_congested(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
|
||||
* @return: true if this bio needs to be submitted with the root blkg context.
|
||||
*
|
||||
* In order to avoid priority inversions we sometimes need to issue a bio as if
|
||||
* it were attached to the root blkg, and then backcharge to the actual owning
|
||||
* blkg. The idea is we do bio_blkcg() to look up the actual context for the
|
||||
* bio and attach the appropriate blkg to the bio. Then we call this helper and
|
||||
* if it is true run with the root blkg for that queue and then do any
|
||||
* backcharging to the originating cgroup once the io is complete.
|
||||
*/
|
||||
static inline bool bio_issue_as_root_blkg(struct bio *bio)
|
||||
{
|
||||
return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkcg_parent - get the parent of a blkcg
|
||||
* @blkcg: blkcg of interest
|
||||
|
|
@ -317,96 +166,6 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
|
|||
return css_to_blkcg(blkcg->css.parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* __blkg_lookup - internal version of blkg_lookup()
|
||||
* @blkcg: blkcg of interest
|
||||
* @q: request_queue of interest
|
||||
* @update_hint: whether to update lookup hint with the result or not
|
||||
*
|
||||
* This is internal version and shouldn't be used by policy
|
||||
* implementations. Looks up blkgs for the @blkcg - @q pair regardless of
|
||||
* @q's bypass state. If @update_hint is %true, the caller should be
|
||||
* holding @q->queue_lock and lookup hint is updated on success.
|
||||
*/
|
||||
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
|
||||
struct request_queue *q,
|
||||
bool update_hint)
|
||||
{
|
||||
struct blkcg_gq *blkg;
|
||||
|
||||
if (blkcg == &blkcg_root)
|
||||
return q->root_blkg;
|
||||
|
||||
blkg = rcu_dereference(blkcg->blkg_hint);
|
||||
if (blkg && blkg->q == q)
|
||||
return blkg;
|
||||
|
||||
return blkg_lookup_slowpath(blkcg, q, update_hint);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_lookup - lookup blkg for the specified blkcg - q pair
|
||||
* @blkcg: blkcg of interest
|
||||
* @q: request_queue of interest
|
||||
*
|
||||
* Lookup blkg for the @blkcg - @q pair. This function should be called
|
||||
* under RCU read lock.
|
||||
*/
|
||||
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
|
||||
struct request_queue *q)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
return __blkg_lookup(blkcg, q, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
|
||||
* @q: request_queue of interest
|
||||
*
|
||||
* Lookup blkg for @q at the root level. See also blkg_lookup().
|
||||
*/
|
||||
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
|
||||
{
|
||||
return q->root_blkg;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_to_pdata - get policy private data
|
||||
* @blkg: blkg of interest
|
||||
* @pol: policy of interest
|
||||
*
|
||||
* Return pointer to private data associated with the @blkg-@pol pair.
|
||||
*/
|
||||
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
|
||||
struct blkcg_policy *pol)
|
||||
{
|
||||
return blkg ? blkg->pd[pol->plid] : NULL;
|
||||
}
|
||||
|
||||
static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
|
||||
struct blkcg_policy *pol)
|
||||
{
|
||||
return blkcg ? blkcg->cpd[pol->plid] : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* pdata_to_blkg - get blkg associated with policy private data
|
||||
* @pd: policy private data of interest
|
||||
*
|
||||
* @pd is policy private data. Determine the blkg it's associated with.
|
||||
*/
|
||||
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
|
||||
{
|
||||
return pd ? pd->blkg : NULL;
|
||||
}
|
||||
|
||||
static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
|
||||
{
|
||||
return cpd ? cpd->blkcg : NULL;
|
||||
}
|
||||
|
||||
extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
|
||||
|
||||
/**
|
||||
* blkcg_pin_online - pin online state
|
||||
* @blkcg: blkcg of interest
|
||||
|
|
@ -439,231 +198,24 @@ static inline void blkcg_unpin_online(struct blkcg *blkcg)
|
|||
} while (blkcg);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_path - format cgroup path of blkg
|
||||
* @blkg: blkg of interest
|
||||
* @buf: target buffer
|
||||
* @buflen: target buffer length
|
||||
*
|
||||
* Format the path of the cgroup of @blkg into @buf.
|
||||
*/
|
||||
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
|
||||
{
|
||||
return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_get - get a blkg reference
|
||||
* @blkg: blkg to get
|
||||
*
|
||||
* The caller should be holding an existing reference.
|
||||
*/
|
||||
static inline void blkg_get(struct blkcg_gq *blkg)
|
||||
{
|
||||
percpu_ref_get(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_tryget - try and get a blkg reference
|
||||
* @blkg: blkg to get
|
||||
*
|
||||
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
|
||||
* of freeing this blkg, so we can only use it if the refcnt is not zero.
|
||||
*/
|
||||
static inline bool blkg_tryget(struct blkcg_gq *blkg)
|
||||
{
|
||||
return blkg && percpu_ref_tryget(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_put - put a blkg reference
|
||||
* @blkg: blkg to put
|
||||
*/
|
||||
static inline void blkg_put(struct blkcg_gq *blkg)
|
||||
{
|
||||
percpu_ref_put(&blkg->refcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
|
||||
* @d_blkg: loop cursor pointing to the current descendant
|
||||
* @pos_css: used for iteration
|
||||
* @p_blkg: target blkg to walk descendants of
|
||||
*
|
||||
* Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
|
||||
* read locked. If called under either blkcg or queue lock, the iteration
|
||||
* is guaranteed to include all and only online blkgs. The caller may
|
||||
* update @pos_css by calling css_rightmost_descendant() to skip subtree.
|
||||
* @p_blkg is included in the iteration and the first node to be visited.
|
||||
*/
|
||||
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
|
||||
css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
|
||||
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
|
||||
(p_blkg)->q, false)))
|
||||
|
||||
/**
|
||||
* blkg_for_each_descendant_post - post-order walk of a blkg's descendants
|
||||
* @d_blkg: loop cursor pointing to the current descendant
|
||||
* @pos_css: used for iteration
|
||||
* @p_blkg: target blkg to walk descendants of
|
||||
*
|
||||
* Similar to blkg_for_each_descendant_pre() but performs post-order
|
||||
* traversal instead. Synchronization rules are the same. @p_blkg is
|
||||
* included in the iteration and the last node to be visited.
|
||||
*/
|
||||
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
|
||||
css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
|
||||
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
|
||||
(p_blkg)->q, false)))
|
||||
|
||||
bool __blkcg_punt_bio_submit(struct bio *bio);
|
||||
|
||||
static inline bool blkcg_punt_bio_submit(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_opf & REQ_CGROUP_PUNT)
|
||||
return __blkcg_punt_bio_submit(bio);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void blkcg_bio_issue_init(struct bio *bio)
|
||||
{
|
||||
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
|
||||
}
|
||||
|
||||
static inline void blkcg_use_delay(struct blkcg_gq *blkg)
|
||||
{
|
||||
if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
|
||||
return;
|
||||
if (atomic_add_return(1, &blkg->use_delay) == 1)
|
||||
atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
|
||||
}
|
||||
|
||||
static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
|
||||
{
|
||||
int old = atomic_read(&blkg->use_delay);
|
||||
|
||||
if (WARN_ON_ONCE(old < 0))
|
||||
return 0;
|
||||
if (old == 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We do this song and dance because we can race with somebody else
|
||||
* adding or removing delay. If we just did an atomic_dec we'd end up
|
||||
* negative and we'd already be in trouble. We need to subtract 1 and
|
||||
* then check to see if we were the last delay so we can drop the
|
||||
* congestion count on the cgroup.
|
||||
*/
|
||||
while (old) {
|
||||
int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
|
||||
if (cur == old)
|
||||
break;
|
||||
old = cur;
|
||||
}
|
||||
|
||||
if (old == 0)
|
||||
return 0;
|
||||
if (old == 1)
|
||||
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
|
||||
* @blkg: target blkg
|
||||
* @delay: delay duration in nsecs
|
||||
*
|
||||
* When enabled with this function, the delay is not decayed and must be
|
||||
* explicitly cleared with blkcg_clear_delay(). Must not be mixed with
|
||||
* blkcg_[un]use_delay() and blkcg_add_delay() usages.
|
||||
*/
|
||||
static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
|
||||
{
|
||||
int old = atomic_read(&blkg->use_delay);
|
||||
|
||||
/* We only want 1 person setting the congestion count for this blkg. */
|
||||
if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
|
||||
atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
|
||||
|
||||
atomic64_set(&blkg->delay_nsec, delay);
|
||||
}
|
||||
|
||||
/**
|
||||
* blkcg_clear_delay - Disable allocator delay mechanism
|
||||
* @blkg: target blkg
|
||||
*
|
||||
* Disable use_delay mechanism. See blkcg_set_delay().
|
||||
*/
|
||||
static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
|
||||
{
|
||||
int old = atomic_read(&blkg->use_delay);
|
||||
|
||||
/* We only want 1 person clearing the congestion count for this blkg. */
|
||||
if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
|
||||
atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
|
||||
}
|
||||
|
||||
void blk_cgroup_bio_start(struct bio *bio);
|
||||
void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
|
||||
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
|
||||
void blkcg_maybe_throttle_current(void);
|
||||
#else /* CONFIG_BLK_CGROUP */
|
||||
|
||||
struct blkcg {
|
||||
};
|
||||
|
||||
struct blkg_policy_data {
|
||||
};
|
||||
|
||||
struct blkcg_policy_data {
|
||||
};
|
||||
|
||||
struct blkcg_gq {
|
||||
};
|
||||
|
||||
struct blkcg_policy {
|
||||
};
|
||||
|
||||
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
|
||||
|
||||
static inline void blkcg_maybe_throttle_current(void) { }
|
||||
static inline bool blk_cgroup_congested(void) { return false; }
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
|
||||
|
||||
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
|
||||
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
|
||||
{ return NULL; }
|
||||
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
|
||||
static inline void blkcg_exit_queue(struct request_queue *q) { }
|
||||
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
|
||||
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
|
||||
static inline int blkcg_activate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol) { return 0; }
|
||||
static inline void blkcg_deactivate_policy(struct request_queue *q,
|
||||
const struct blkcg_policy *pol) { }
|
||||
|
||||
static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
|
||||
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
|
||||
struct blkcg_policy *pol) { return NULL; }
|
||||
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
|
||||
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
|
||||
static inline void blkg_get(struct blkcg_gq *blkg) { }
|
||||
static inline void blkg_put(struct blkcg_gq *blkg) { }
|
||||
|
||||
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
|
||||
static inline void blkcg_bio_issue_init(struct bio *bio) { }
|
||||
static inline void blk_cgroup_bio_start(struct bio *bio) { }
|
||||
|
||||
#define blk_queue_for_each_rl(rl, q) \
|
||||
for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
#endif /* CONFIG_BLK_CGROUP */
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP_FC_APPID
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ struct blk_integrity_iter {
|
|||
sector_t seed;
|
||||
unsigned int data_size;
|
||||
unsigned short interval;
|
||||
unsigned char tuple_size;
|
||||
const char *disk_name;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -917,8 +917,7 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
|
|||
}
|
||||
|
||||
#define queue_for_each_hw_ctx(q, hctx, i) \
|
||||
for ((i) = 0; (i) < (q)->nr_hw_queues && \
|
||||
({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
|
||||
xa_for_each(&(q)->hctx_table, (i), (hctx))
|
||||
|
||||
#define hctx_for_each_ctx(hctx, ctx, i) \
|
||||
for ((i) = 0; (i) < (hctx)->nr_ctx && \
|
||||
|
|
@ -952,8 +951,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
|||
struct bio_set *bs, gfp_t gfp_mask,
|
||||
int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
|
||||
void blk_rq_unprep_clone(struct request *rq);
|
||||
blk_status_t blk_insert_cloned_request(struct request_queue *q,
|
||||
struct request *rq);
|
||||
blk_status_t blk_insert_cloned_request(struct request *rq);
|
||||
|
||||
struct rq_map_data {
|
||||
struct page **pages;
|
||||
|
|
|
|||
|
|
@ -85,8 +85,10 @@ struct block_device {
|
|||
*/
|
||||
#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
|
||||
typedef u32 __bitwise blk_status_t;
|
||||
typedef u32 blk_short_t;
|
||||
#else
|
||||
typedef u8 __bitwise blk_status_t;
|
||||
typedef u16 blk_short_t;
|
||||
#endif
|
||||
#define BLK_STS_OK 0
|
||||
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
|
||||
|
|
@ -153,6 +155,13 @@ typedef u8 __bitwise blk_status_t;
|
|||
*/
|
||||
#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
|
||||
|
||||
/*
|
||||
* BLK_STS_OFFLINE is returned from the driver when the target device is offline
|
||||
* or is being taken offline. This could help differentiate the case where a
|
||||
* device is intentionally being shut down from a real I/O error.
|
||||
*/
|
||||
#define BLK_STS_OFFLINE ((__force blk_status_t)17)
|
||||
|
||||
/**
|
||||
* blk_path_error - returns true if error may be path related
|
||||
* @error: status the request was completed with
|
||||
|
|
@ -243,7 +252,6 @@ struct bio {
|
|||
*/
|
||||
unsigned short bi_flags; /* BIO_* below */
|
||||
unsigned short bi_ioprio;
|
||||
unsigned short bi_write_hint;
|
||||
blk_status_t bi_status;
|
||||
atomic_t __bi_remaining;
|
||||
|
||||
|
|
@ -317,7 +325,8 @@ enum {
|
|||
BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
|
||||
* of this bio. */
|
||||
BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
|
||||
BIO_TRACKED, /* set if bio goes through the rq_qos path */
|
||||
BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
|
||||
BIO_QOS_MERGED, /* but went through rq_qos merge path */
|
||||
BIO_REMAPPED,
|
||||
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
|
||||
BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
|
||||
|
|
@ -354,8 +363,6 @@ enum req_opf {
|
|||
REQ_OP_DISCARD = 3,
|
||||
/* securely erase sectors */
|
||||
REQ_OP_SECURE_ERASE = 5,
|
||||
/* write the same sector many times */
|
||||
REQ_OP_WRITE_SAME = 7,
|
||||
/* write the zero filled sector many times */
|
||||
REQ_OP_WRITE_ZEROES = 9,
|
||||
/* Open a zone */
|
||||
|
|
|
|||
|
|
@ -1,9 +1,13 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Portions Copyright (C) 1992 Drew Eckhardt
|
||||
*/
|
||||
#ifndef _LINUX_BLKDEV_H
|
||||
#define _LINUX_BLKDEV_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/blk_types.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/llist.h>
|
||||
#include <linux/minmax.h>
|
||||
|
|
@ -12,11 +16,15 @@
|
|||
#include <linux/wait.h>
|
||||
#include <linux/bio.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/blkzoned.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sbitmap.h>
|
||||
#include <linux/srcu.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
struct module;
|
||||
struct request_queue;
|
||||
|
|
@ -33,6 +41,10 @@ struct blk_queue_stats;
|
|||
struct blk_stat_callback;
|
||||
struct blk_crypto_profile;
|
||||
|
||||
extern const struct device_type disk_type;
|
||||
extern struct device_type part_type;
|
||||
extern struct class block_class;
|
||||
|
||||
/* Must be consistent with blk_mq_poll_stats_bkt() */
|
||||
#define BLK_MQ_POLL_STATS_BKTS 16
|
||||
|
||||
|
|
@ -45,6 +57,145 @@ struct blk_crypto_profile;
|
|||
*/
|
||||
#define BLKCG_MAX_POLS 6
|
||||
|
||||
#define DISK_MAX_PARTS 256
|
||||
#define DISK_NAME_LEN 32
|
||||
|
||||
#define PARTITION_META_INFO_VOLNAMELTH 64
|
||||
/*
|
||||
* Enough for the string representation of any kind of UUID plus NULL.
|
||||
* EFI UUID is 36 characters. MSDOS UUID is 11 characters.
|
||||
*/
|
||||
#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
|
||||
|
||||
struct partition_meta_info {
|
||||
char uuid[PARTITION_META_INFO_UUIDLTH];
|
||||
u8 volname[PARTITION_META_INFO_VOLNAMELTH];
|
||||
};
|
||||
|
||||
/**
|
||||
* DOC: genhd capability flags
|
||||
*
|
||||
* ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
|
||||
* removable media. When set, the device remains present even when media is not
|
||||
* inserted. Shall not be set for devices which are removed entirely when the
|
||||
* media is removed.
|
||||
*
|
||||
* ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
|
||||
* doesn't appear in sysfs, and can't be opened from userspace or using
|
||||
* blkdev_get*. Used for the underlying components of multipath devices.
|
||||
*
|
||||
* ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
|
||||
* scan for partitions from add_disk, and users can't add partitions manually.
|
||||
*
|
||||
*/
|
||||
enum {
|
||||
GENHD_FL_REMOVABLE = 1 << 0,
|
||||
GENHD_FL_HIDDEN = 1 << 1,
|
||||
GENHD_FL_NO_PART = 1 << 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
|
||||
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
|
||||
};
|
||||
|
||||
enum {
|
||||
/* Poll even if events_poll_msecs is unset */
|
||||
DISK_EVENT_FLAG_POLL = 1 << 0,
|
||||
/* Forward events to udev */
|
||||
DISK_EVENT_FLAG_UEVENT = 1 << 1,
|
||||
/* Block event polling when open for exclusive write */
|
||||
DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
|
||||
};
|
||||
|
||||
struct disk_events;
|
||||
struct badblocks;
|
||||
|
||||
struct blk_integrity {
|
||||
const struct blk_integrity_profile *profile;
|
||||
unsigned char flags;
|
||||
unsigned char tuple_size;
|
||||
unsigned char interval_exp;
|
||||
unsigned char tag_size;
|
||||
};
|
||||
|
||||
struct gendisk {
|
||||
/*
|
||||
* major/first_minor/minors should not be set by any new driver, the
|
||||
* block core will take care of allocating them automatically.
|
||||
*/
|
||||
int major;
|
||||
int first_minor;
|
||||
int minors;
|
||||
|
||||
char disk_name[DISK_NAME_LEN]; /* name of major driver */
|
||||
|
||||
unsigned short events; /* supported events */
|
||||
unsigned short event_flags; /* flags related to event processing */
|
||||
|
||||
struct xarray part_tbl;
|
||||
struct block_device *part0;
|
||||
|
||||
const struct block_device_operations *fops;
|
||||
struct request_queue *queue;
|
||||
void *private_data;
|
||||
|
||||
int flags;
|
||||
unsigned long state;
|
||||
#define GD_NEED_PART_SCAN 0
|
||||
#define GD_READ_ONLY 1
|
||||
#define GD_DEAD 2
|
||||
#define GD_NATIVE_CAPACITY 3
|
||||
#define GD_ADDED 4
|
||||
|
||||
struct mutex open_mutex; /* open/close mutex */
|
||||
unsigned open_partitions; /* number of open partitions */
|
||||
|
||||
struct backing_dev_info *bdi;
|
||||
struct kobject *slave_dir;
|
||||
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
|
||||
struct list_head slave_bdevs;
|
||||
#endif
|
||||
struct timer_rand_state *random;
|
||||
atomic_t sync_io; /* RAID */
|
||||
struct disk_events *ev;
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
struct kobject integrity_kobj;
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
#if IS_ENABLED(CONFIG_CDROM)
|
||||
struct cdrom_device_info *cdi;
|
||||
#endif
|
||||
int node_id;
|
||||
struct badblocks *bb;
|
||||
struct lockdep_map lockdep_map;
|
||||
u64 diskseq;
|
||||
};
|
||||
|
||||
static inline bool disk_live(struct gendisk *disk)
|
||||
{
|
||||
return !inode_unhashed(disk->part0->bd_inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* The gendisk is refcounted by the part0 block_device, and the bd_device
|
||||
* therein is also used for device model presentation in sysfs.
|
||||
*/
|
||||
#define dev_to_disk(device) \
|
||||
(dev_to_bdev(device)->bd_disk)
|
||||
#define disk_to_dev(disk) \
|
||||
(&((disk)->part0->bd_device))
|
||||
|
||||
#if IS_REACHABLE(CONFIG_CDROM)
|
||||
#define disk_to_cdi(disk) ((disk)->cdi)
|
||||
#else
|
||||
#define disk_to_cdi(disk) NULL
|
||||
#endif
|
||||
|
||||
static inline dev_t disk_devt(struct gendisk *disk)
|
||||
{
|
||||
return MKDEV(disk->major, disk->first_minor);
|
||||
}
|
||||
|
||||
static inline int blk_validate_block_size(unsigned long bsize)
|
||||
{
|
||||
if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
|
||||
|
|
@ -97,7 +248,6 @@ struct queue_limits {
|
|||
unsigned int io_opt;
|
||||
unsigned int max_discard_sectors;
|
||||
unsigned int max_hw_discard_sectors;
|
||||
unsigned int max_write_same_sectors;
|
||||
unsigned int max_write_zeroes_sectors;
|
||||
unsigned int max_zone_append_sectors;
|
||||
unsigned int discard_granularity;
|
||||
|
|
@ -204,7 +354,7 @@ struct request_queue {
|
|||
unsigned int queue_depth;
|
||||
|
||||
/* hw dispatch queues */
|
||||
struct blk_mq_hw_ctx **queue_hw_ctx;
|
||||
struct xarray hctx_table;
|
||||
unsigned int nr_hw_queues;
|
||||
|
||||
/*
|
||||
|
|
@ -262,6 +412,7 @@ struct request_queue {
|
|||
|
||||
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
||||
struct blk_crypto_profile *crypto_profile;
|
||||
struct kobject *crypto_kobject;
|
||||
#endif
|
||||
|
||||
unsigned int rq_timeout;
|
||||
|
|
@ -366,9 +517,6 @@ struct request_queue {
|
|||
|
||||
bool mq_sysfs_init_done;
|
||||
|
||||
#define BLK_MAX_WRITE_HINTS 5
|
||||
u64 write_hints[BLK_MAX_WRITE_HINTS];
|
||||
|
||||
/*
|
||||
* Independent sector access ranges. This is always NULL for
|
||||
* devices that do not have multiple independent access ranges.
|
||||
|
|
@ -596,6 +744,118 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
|
|||
#define for_each_bio(_bio) \
|
||||
for (; _bio; _bio = _bio->bi_next)
|
||||
|
||||
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups);
|
||||
static inline int __must_check add_disk(struct gendisk *disk)
|
||||
{
|
||||
return device_add_disk(NULL, disk, NULL);
|
||||
}
|
||||
void del_gendisk(struct gendisk *gp);
|
||||
void invalidate_disk(struct gendisk *disk);
|
||||
void set_disk_ro(struct gendisk *disk, bool read_only);
|
||||
void disk_uevent(struct gendisk *disk, enum kobject_action action);
|
||||
|
||||
static inline int get_disk_ro(struct gendisk *disk)
|
||||
{
|
||||
return disk->part0->bd_read_only ||
|
||||
test_bit(GD_READ_ONLY, &disk->state);
|
||||
}
|
||||
|
||||
static inline int bdev_read_only(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
|
||||
}
|
||||
|
||||
bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
|
||||
bool disk_force_media_change(struct gendisk *disk, unsigned int events);
|
||||
|
||||
void add_disk_randomness(struct gendisk *disk) __latent_entropy;
|
||||
void rand_initialize_disk(struct gendisk *disk);
|
||||
|
||||
static inline sector_t get_start_sect(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_start_sect;
|
||||
}
|
||||
|
||||
static inline sector_t bdev_nr_sectors(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_nr_sectors;
|
||||
}
|
||||
|
||||
static inline loff_t bdev_nr_bytes(struct block_device *bdev)
|
||||
{
|
||||
return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static inline sector_t get_capacity(struct gendisk *disk)
|
||||
{
|
||||
return bdev_nr_sectors(disk->part0);
|
||||
}
|
||||
|
||||
static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
|
||||
{
|
||||
return bdev_nr_sectors(sb->s_bdev) >>
|
||||
(sb->s_blocksize_bits - SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
|
||||
|
||||
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
||||
struct lock_class_key *lkclass);
|
||||
void put_disk(struct gendisk *disk);
|
||||
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
|
||||
|
||||
/**
|
||||
* blk_alloc_disk - allocate a gendisk structure
|
||||
* @node_id: numa node to allocate on
|
||||
*
|
||||
* Allocate and pre-initialize a gendisk structure for use with BIO based
|
||||
* drivers.
|
||||
*
|
||||
* Context: can sleep
|
||||
*/
|
||||
#define blk_alloc_disk(node_id) \
|
||||
({ \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__blk_alloc_disk(node_id, &__key); \
|
||||
})
|
||||
void blk_cleanup_disk(struct gendisk *disk);
|
||||
|
||||
int __register_blkdev(unsigned int major, const char *name,
|
||||
void (*probe)(dev_t devt));
|
||||
#define register_blkdev(major, name) \
|
||||
__register_blkdev(major, name, NULL)
|
||||
void unregister_blkdev(unsigned int major, const char *name);
|
||||
|
||||
bool bdev_check_media_change(struct block_device *bdev);
|
||||
int __invalidate_device(struct block_device *bdev, bool kill_dirty);
|
||||
void set_capacity(struct gendisk *disk, sector_t size);
|
||||
|
||||
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
|
||||
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
|
||||
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
|
||||
int bd_register_pending_holders(struct gendisk *disk);
|
||||
#else
|
||||
static inline int bd_link_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void bd_unlink_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
}
|
||||
static inline int bd_register_pending_holders(struct gendisk *disk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
|
||||
|
||||
dev_t part_devt(struct gendisk *disk, u8 partno);
|
||||
void inc_diskseq(struct gendisk *disk);
|
||||
dev_t blk_lookup_devt(const char *name, int partno);
|
||||
void blk_request_module(dev_t devt);
|
||||
|
||||
extern int blk_register_queue(struct gendisk *disk);
|
||||
extern void blk_unregister_queue(struct gendisk *disk);
|
||||
|
|
@ -651,9 +911,6 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
|||
return min(q->limits.max_discard_sectors,
|
||||
UINT_MAX >> SECTOR_SHIFT);
|
||||
|
||||
if (unlikely(op == REQ_OP_WRITE_SAME))
|
||||
return q->limits.max_write_same_sectors;
|
||||
|
||||
if (unlikely(op == REQ_OP_WRITE_ZEROES))
|
||||
return q->limits.max_write_zeroes_sectors;
|
||||
|
||||
|
|
@ -696,8 +953,6 @@ extern void blk_queue_max_discard_segments(struct request_queue *,
|
|||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
unsigned int max_discard_sectors);
|
||||
extern void blk_queue_max_write_same_sectors(struct request_queue *q,
|
||||
unsigned int max_write_same_sectors);
|
||||
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
|
||||
unsigned int max_write_same_sectors);
|
||||
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
|
||||
|
|
@ -748,7 +1003,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
|||
|
||||
bool __must_check blk_get_queue(struct request_queue *);
|
||||
extern void blk_put_queue(struct request_queue *);
|
||||
extern void blk_set_queue_dying(struct request_queue *);
|
||||
|
||||
void blk_mark_disk_dead(struct gendisk *disk);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
/*
|
||||
|
|
@ -791,14 +1047,11 @@ extern void blk_start_plug(struct blk_plug *);
|
|||
extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
|
||||
extern void blk_finish_plug(struct blk_plug *);
|
||||
|
||||
void blk_flush_plug(struct blk_plug *plug, bool from_schedule);
|
||||
|
||||
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
||||
void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
|
||||
static inline void blk_flush_plug(struct blk_plug *plug, bool async)
|
||||
{
|
||||
struct blk_plug *plug = tsk->plug;
|
||||
|
||||
return plug &&
|
||||
(plug->mq_list || !list_empty(&plug->cb_list));
|
||||
if (plug)
|
||||
__blk_flush_plug(plug, async);
|
||||
}
|
||||
|
||||
int blkdev_issue_flush(struct block_device *bdev);
|
||||
|
|
@ -824,11 +1077,6 @@ static inline void blk_flush_plug(struct blk_plug *plug, bool async)
|
|||
{
|
||||
}
|
||||
|
||||
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int blkdev_issue_flush(struct block_device *bdev)
|
||||
{
|
||||
return 0;
|
||||
|
|
@ -842,9 +1090,6 @@ static inline long nr_blockdev_pages(void)
|
|||
|
||||
extern void blk_io_schedule(void);
|
||||
|
||||
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
|
||||
|
||||
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
|
||||
|
||||
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
||||
|
|
@ -1071,16 +1316,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
|
|||
return q->limits.discard_alignment;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_write_same(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return q->limits.max_write_same_sectors;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
|
@ -1200,6 +1435,8 @@ enum blk_unique_id {
|
|||
|
||||
struct block_device_operations {
|
||||
void (*submit_bio)(struct bio *bio);
|
||||
int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
|
||||
unsigned int flags);
|
||||
int (*open) (struct block_device *, fmode_t);
|
||||
void (*release) (struct gendisk *, fmode_t);
|
||||
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
|
||||
|
|
@ -1210,6 +1447,7 @@ struct block_device_operations {
|
|||
void (*unlock_native_capacity) (struct gendisk *);
|
||||
int (*getgeo)(struct block_device *, struct hd_geometry *);
|
||||
int (*set_read_only)(struct block_device *bdev, bool ro);
|
||||
void (*free_disk)(struct gendisk *disk);
|
||||
/* this callback is with swap_lock and sometimes page table lock held */
|
||||
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
|
||||
int (*report_zones)(struct gendisk *, sector_t sector,
|
||||
|
|
@ -1258,6 +1496,7 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
|
|||
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
|
||||
unsigned long start_time);
|
||||
|
||||
void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
|
||||
unsigned long bio_start_io_acct(struct bio *bio);
|
||||
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
|
||||
struct block_device *orig_bdev);
|
||||
|
|
@ -1265,7 +1504,7 @@ void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
|
|||
/**
|
||||
* bio_end_io_acct - end I/O accounting for bio based drivers
|
||||
* @bio: bio to end account for
|
||||
* @start: start time returned by bio_start_io_acct()
|
||||
* @start_time: start time returned by bio_start_io_acct()
|
||||
*/
|
||||
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
|
||||
{
|
||||
|
|
@ -1310,6 +1549,7 @@ void invalidate_bdev(struct block_device *bdev);
|
|||
int sync_blockdev(struct block_device *bdev);
|
||||
int sync_blockdev_nowait(struct block_device *bdev);
|
||||
void sync_bdevs(bool wait);
|
||||
void printk_all_partitions(void);
|
||||
#else
|
||||
static inline void invalidate_bdev(struct block_device *bdev)
|
||||
{
|
||||
|
|
@ -1325,7 +1565,11 @@ static inline int sync_blockdev_nowait(struct block_device *bdev)
|
|||
static inline void sync_bdevs(bool wait)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
static inline void printk_all_partitions(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
int fsync_bdev(struct block_device *bdev);
|
||||
|
||||
int freeze_bdev(struct block_device *bdev);
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/jump_label.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <net/sock.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
struct sock;
|
||||
|
|
@ -165,11 +166,23 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
|
|||
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags);
|
||||
|
||||
/* Opportunistic check to see whether we have any BPF program attached*/
|
||||
static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
|
||||
enum cgroup_bpf_attach_type type)
|
||||
{
|
||||
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
struct bpf_prog_array *array;
|
||||
|
||||
array = rcu_access_pointer(cgrp->bpf.effective[type]);
|
||||
return array != &bpf_empty_prog_array.hdr;
|
||||
}
|
||||
|
||||
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
|
||||
cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
|
||||
CGROUP_INET_INGRESS); \
|
||||
\
|
||||
|
|
@ -181,7 +194,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
|||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
|
||||
typeof(sk) __sk = sk_to_full_sk(sk); \
|
||||
if (sk_fullsock(__sk)) \
|
||||
if (sk_fullsock(__sk) && \
|
||||
cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
|
||||
CGROUP_INET_EGRESS); \
|
||||
} \
|
||||
|
|
@ -347,7 +361,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
|||
kernel_optval) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
|
||||
cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
|
||||
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
|
||||
optname, optval, \
|
||||
optlen, \
|
||||
|
|
@ -367,7 +382,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
|||
max_optlen, retval) \
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
|
||||
cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
|
||||
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
|
||||
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
|
||||
tcp_bpf_bypass_getsockopt, \
|
||||
|
|
|
|||
|
|
@ -194,6 +194,17 @@ struct bpf_map {
|
|||
struct work_struct work;
|
||||
struct mutex freeze_mutex;
|
||||
atomic64_t writecnt;
|
||||
/* 'Ownership' of program-containing map is claimed by the first program
|
||||
* that is going to use this map or by the first program which FD is
|
||||
* stored in the map to make sure that all callers and callees have the
|
||||
* same prog type, JITed flag and xdp_has_frags flag.
|
||||
*/
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
enum bpf_prog_type type;
|
||||
bool jited;
|
||||
bool xdp_has_frags;
|
||||
} owner;
|
||||
};
|
||||
|
||||
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
|
||||
|
|
@ -209,11 +220,9 @@ static inline bool map_value_has_timer(const struct bpf_map *map)
|
|||
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
|
||||
{
|
||||
if (unlikely(map_value_has_spin_lock(map)))
|
||||
*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
|
||||
(struct bpf_spin_lock){};
|
||||
memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
|
||||
if (unlikely(map_value_has_timer(map)))
|
||||
*(struct bpf_timer *)(dst + map->timer_off) =
|
||||
(struct bpf_timer){};
|
||||
memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
|
||||
}
|
||||
|
||||
/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
|
||||
|
|
@ -224,7 +233,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
|
|||
if (unlikely(map_value_has_spin_lock(map))) {
|
||||
s_off = map->spin_lock_off;
|
||||
s_sz = sizeof(struct bpf_spin_lock);
|
||||
} else if (unlikely(map_value_has_timer(map))) {
|
||||
}
|
||||
if (unlikely(map_value_has_timer(map))) {
|
||||
t_off = map->timer_off;
|
||||
t_sz = sizeof(struct bpf_timer);
|
||||
}
|
||||
|
|
@ -321,7 +331,18 @@ enum bpf_type_flag {
|
|||
*/
|
||||
MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_LAST_FLAG = MEM_ALLOC,
|
||||
/* MEM is in user address space. */
|
||||
MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
/* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
|
||||
* with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
|
||||
* order to drop this tag, it must be passed into bpf_per_cpu_ptr()
|
||||
* or bpf_this_cpu_ptr(), which will return the pointer corresponding
|
||||
* to the specified cpu.
|
||||
*/
|
||||
MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_LAST_FLAG = MEM_PERCPU,
|
||||
};
|
||||
|
||||
/* Max number of base types. */
|
||||
|
|
@ -503,7 +524,6 @@ enum bpf_reg_type {
|
|||
*/
|
||||
PTR_TO_MEM, /* reg points to valid memory region */
|
||||
PTR_TO_BUF, /* reg points to a read/write buffer */
|
||||
PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
|
||||
PTR_TO_FUNC, /* reg points to a bpf program function */
|
||||
__BPF_REG_TYPE_MAX,
|
||||
|
||||
|
|
@ -577,8 +597,7 @@ struct bpf_verifier_ops {
|
|||
const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
u32 *next_btf_id);
|
||||
bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner);
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
};
|
||||
|
||||
struct bpf_prog_offload_ops {
|
||||
|
|
@ -833,8 +852,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
|
|||
void bpf_image_ksym_del(struct bpf_ksym *ksym);
|
||||
void bpf_ksym_add(struct bpf_ksym *ksym);
|
||||
void bpf_ksym_del(struct bpf_ksym *ksym);
|
||||
int bpf_jit_charge_modmem(u32 pages);
|
||||
void bpf_jit_uncharge_modmem(u32 pages);
|
||||
int bpf_jit_charge_modmem(u32 size);
|
||||
void bpf_jit_uncharge_modmem(u32 size);
|
||||
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
|
||||
#else
|
||||
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
|
||||
|
|
@ -939,6 +958,8 @@ struct bpf_prog_aux {
|
|||
bool func_proto_unreliable;
|
||||
bool sleepable;
|
||||
bool tail_call_reachable;
|
||||
bool xdp_has_frags;
|
||||
bool use_bpf_prog_pack;
|
||||
struct hlist_node tramp_hlist;
|
||||
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
|
||||
const struct btf_type *attach_func_proto;
|
||||
|
|
@ -999,16 +1020,6 @@ struct bpf_prog_aux {
|
|||
};
|
||||
|
||||
struct bpf_array_aux {
|
||||
/* 'Ownership' of prog array is claimed by the first program that
|
||||
* is going to use this map or by the first program which FD is
|
||||
* stored in the map to make sure that all callers and callees have
|
||||
* the same prog type and JITed flag.
|
||||
*/
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
enum bpf_prog_type type;
|
||||
bool jited;
|
||||
} owner;
|
||||
/* Programs with direct jumps into programs part of this array. */
|
||||
struct list_head poke_progs;
|
||||
struct bpf_map *map;
|
||||
|
|
@ -1183,7 +1194,14 @@ struct bpf_event_entry {
|
|||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
|
||||
static inline bool map_type_contains_progs(struct bpf_map *map)
|
||||
{
|
||||
return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
|
||||
map->map_type == BPF_MAP_TYPE_DEVMAP ||
|
||||
map->map_type == BPF_MAP_TYPE_CPUMAP;
|
||||
}
|
||||
|
||||
bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
|
||||
int bpf_prog_calc_tag(struct bpf_prog *fp);
|
||||
|
||||
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
|
||||
|
|
@ -1225,6 +1243,19 @@ struct bpf_prog_array {
|
|||
struct bpf_prog_array_item items[];
|
||||
};
|
||||
|
||||
struct bpf_empty_prog_array {
|
||||
struct bpf_prog_array hdr;
|
||||
struct bpf_prog *null_prog;
|
||||
};
|
||||
|
||||
/* to avoid allocating empty bpf_prog_array for cgroups that
|
||||
* don't have bpf program attached use one global 'bpf_empty_prog_array'
|
||||
* It will not be modified the caller of bpf_prog_array_alloc()
|
||||
* (since caller requested prog_cnt == 0)
|
||||
* that pointer should be 'freed' by bpf_prog_array_free()
|
||||
*/
|
||||
extern struct bpf_empty_prog_array bpf_empty_prog_array;
|
||||
|
||||
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
|
||||
void bpf_prog_array_free(struct bpf_prog_array *progs);
|
||||
int bpf_prog_array_length(struct bpf_prog_array *progs);
|
||||
|
|
@ -1251,6 +1282,7 @@ struct bpf_run_ctx {};
|
|||
struct bpf_cg_run_ctx {
|
||||
struct bpf_run_ctx run_ctx;
|
||||
const struct bpf_prog_array_item *prog_item;
|
||||
int retval;
|
||||
};
|
||||
|
||||
struct bpf_trace_run_ctx {
|
||||
|
|
@ -1283,19 +1315,19 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
|
|||
|
||||
typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
|
||||
|
||||
static __always_inline u32
|
||||
static __always_inline int
|
||||
BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
|
||||
const void *ctx, bpf_prog_run_fn run_prog,
|
||||
u32 *ret_flags)
|
||||
int retval, u32 *ret_flags)
|
||||
{
|
||||
const struct bpf_prog_array_item *item;
|
||||
const struct bpf_prog *prog;
|
||||
const struct bpf_prog_array *array;
|
||||
struct bpf_run_ctx *old_run_ctx;
|
||||
struct bpf_cg_run_ctx run_ctx;
|
||||
u32 ret = 1;
|
||||
u32 func_ret;
|
||||
|
||||
run_ctx.retval = retval;
|
||||
migrate_disable();
|
||||
rcu_read_lock();
|
||||
array = rcu_dereference(array_rcu);
|
||||
|
|
@ -1304,27 +1336,29 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
|
|||
while ((prog = READ_ONCE(item->prog))) {
|
||||
run_ctx.prog_item = item;
|
||||
func_ret = run_prog(prog, ctx);
|
||||
ret &= (func_ret & 1);
|
||||
if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
|
||||
run_ctx.retval = -EPERM;
|
||||
*(ret_flags) |= (func_ret >> 1);
|
||||
item++;
|
||||
}
|
||||
bpf_reset_run_ctx(old_run_ctx);
|
||||
rcu_read_unlock();
|
||||
migrate_enable();
|
||||
return ret;
|
||||
return run_ctx.retval;
|
||||
}
|
||||
|
||||
static __always_inline u32
|
||||
static __always_inline int
|
||||
BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
|
||||
const void *ctx, bpf_prog_run_fn run_prog)
|
||||
const void *ctx, bpf_prog_run_fn run_prog,
|
||||
int retval)
|
||||
{
|
||||
const struct bpf_prog_array_item *item;
|
||||
const struct bpf_prog *prog;
|
||||
const struct bpf_prog_array *array;
|
||||
struct bpf_run_ctx *old_run_ctx;
|
||||
struct bpf_cg_run_ctx run_ctx;
|
||||
u32 ret = 1;
|
||||
|
||||
run_ctx.retval = retval;
|
||||
migrate_disable();
|
||||
rcu_read_lock();
|
||||
array = rcu_dereference(array_rcu);
|
||||
|
|
@ -1332,13 +1366,14 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
|
|||
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
|
||||
while ((prog = READ_ONCE(item->prog))) {
|
||||
run_ctx.prog_item = item;
|
||||
ret &= run_prog(prog, ctx);
|
||||
if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
|
||||
run_ctx.retval = -EPERM;
|
||||
item++;
|
||||
}
|
||||
bpf_reset_run_ctx(old_run_ctx);
|
||||
rcu_read_unlock();
|
||||
migrate_enable();
|
||||
return ret;
|
||||
return run_ctx.retval;
|
||||
}
|
||||
|
||||
static __always_inline u32
|
||||
|
|
@ -1391,19 +1426,21 @@ out:
|
|||
* 0: NET_XMIT_SUCCESS skb should be transmitted
|
||||
* 1: NET_XMIT_DROP skb should be dropped and cn
|
||||
* 2: NET_XMIT_CN skb should be transmitted and cn
|
||||
* 3: -EPERM skb should be dropped
|
||||
* 3: -err skb should be dropped
|
||||
*/
|
||||
#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
|
||||
({ \
|
||||
u32 _flags = 0; \
|
||||
bool _cn; \
|
||||
u32 _ret; \
|
||||
_ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
|
||||
_ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \
|
||||
_cn = _flags & BPF_RET_SET_CN; \
|
||||
if (_ret) \
|
||||
if (_ret && !IS_ERR_VALUE((long)_ret)) \
|
||||
_ret = -EFAULT; \
|
||||
if (!_ret) \
|
||||
_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
|
||||
else \
|
||||
_ret = (_cn ? NET_XMIT_DROP : -EPERM); \
|
||||
_ret = (_cn ? NET_XMIT_DROP : _ret); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
|
|
@ -1724,7 +1761,6 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
|
|||
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
|
||||
const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
|
||||
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info);
|
||||
|
|
@ -1754,7 +1790,7 @@ static inline bool bpf_tracing_btf_ctx_access(int off, int size,
|
|||
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
u32 *next_btf_id);
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
bool btf_struct_ids_match(struct bpf_verifier_log *log,
|
||||
const struct btf *btf, u32 id, int off,
|
||||
const struct btf *need_btf, u32 need_type_id);
|
||||
|
|
@ -1793,6 +1829,11 @@ struct bpf_core_ctx {
|
|||
int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
|
||||
int relo_idx, void *insn);
|
||||
|
||||
static inline bool unprivileged_ebpf_enabled(void)
|
||||
{
|
||||
return !sysctl_unprivileged_bpf_disabled;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_BPF_SYSCALL */
|
||||
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
|
||||
{
|
||||
|
|
@ -1862,11 +1903,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool dev_map_can_have_prog(struct bpf_map *map)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void __dev_flush(void)
|
||||
{
|
||||
}
|
||||
|
|
@ -1930,11 +1966,6 @@ static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool cpu_map_prog_allowed(struct bpf_map *map)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
|
||||
enum bpf_prog_type type)
|
||||
{
|
||||
|
|
@ -1976,12 +2007,6 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
|
|||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
|
||||
struct module *owner)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void bpf_map_put(struct bpf_map *map)
|
||||
{
|
||||
}
|
||||
|
|
@ -2012,6 +2037,12 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool unprivileged_ebpf_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
|
||||
|
|
@ -2076,6 +2107,9 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
|
|||
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
|
||||
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
|
||||
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
|
||||
int sock_map_bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
void sock_map_unhash(struct sock *sk);
|
||||
void sock_map_close(struct sock *sk, long timeout);
|
||||
#else
|
||||
|
|
@ -2129,6 +2163,12 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void
|
|||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
|
||||
|
||||
|
|
@ -2227,6 +2267,7 @@ extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
|
|||
extern const struct bpf_func_proto bpf_find_vma_proto;
|
||||
extern const struct bpf_func_proto bpf_loop_proto;
|
||||
extern const struct bpf_func_proto bpf_strncmp_proto;
|
||||
extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
|
||||
|
||||
const struct bpf_func_proto *tracing_prog_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
|
@ -2339,6 +2380,8 @@ enum bpf_text_poke_type {
|
|||
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
|
||||
void *addr1, void *addr2);
|
||||
|
||||
void *bpf_arch_text_copy(void *dst, void *src, size_t len);
|
||||
|
||||
struct btf_id_set;
|
||||
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
|
||||
|
||||
|
|
|
|||
|
|
@ -154,16 +154,17 @@ void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem);
|
|||
|
||||
struct bpf_local_storage_elem *
|
||||
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
|
||||
bool charge_mem);
|
||||
bool charge_mem, gfp_t gfp_flags);
|
||||
|
||||
int
|
||||
bpf_local_storage_alloc(void *owner,
|
||||
struct bpf_local_storage_map *smap,
|
||||
struct bpf_local_storage_elem *first_selem);
|
||||
struct bpf_local_storage_elem *first_selem,
|
||||
gfp_t gfp_flags);
|
||||
|
||||
struct bpf_local_storage_data *
|
||||
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
|
||||
void *value, u64 map_flags);
|
||||
void *value, u64 map_flags, gfp_t gfp_flags);
|
||||
|
||||
void bpf_local_storage_free_rcu(struct rcu_head *rcu);
|
||||
|
||||
|
|
|
|||
|
|
@ -140,3 +140,4 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
|
|||
#ifdef CONFIG_PERF_EVENTS
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
|
||||
#endif
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
|
||||
|
|
|
|||
|
|
@ -521,6 +521,12 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
|
|||
|
||||
int check_ptr_off_reg(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno);
|
||||
int check_func_arg_reg_off(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno,
|
||||
enum bpf_arg_type arg_type,
|
||||
bool is_release_func);
|
||||
int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
|
||||
u32 regno);
|
||||
int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
|
||||
u32 regno, u32 mem_size);
|
||||
|
||||
|
|
@ -564,4 +570,11 @@ static inline u32 type_flag(u32 type)
|
|||
return type & ~BPF_BASE_TYPE_MASK;
|
||||
}
|
||||
|
||||
/* only use after check_attach_btf_id() */
|
||||
static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
|
||||
{
|
||||
return prog->type == BPF_PROG_TYPE_EXT ?
|
||||
prog->aux->dst_prog->type : prog->type;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@
|
|||
#define _BLK_BSG_
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <scsi/scsi_request.h>
|
||||
|
||||
struct bsg_job;
|
||||
struct request;
|
||||
|
|
|
|||
|
|
@ -12,11 +12,33 @@
|
|||
#define BTF_TYPE_EMIT(type) ((void)(type *)0)
|
||||
#define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val)
|
||||
|
||||
enum btf_kfunc_type {
|
||||
BTF_KFUNC_TYPE_CHECK,
|
||||
BTF_KFUNC_TYPE_ACQUIRE,
|
||||
BTF_KFUNC_TYPE_RELEASE,
|
||||
BTF_KFUNC_TYPE_RET_NULL,
|
||||
BTF_KFUNC_TYPE_MAX,
|
||||
};
|
||||
|
||||
struct btf;
|
||||
struct btf_member;
|
||||
struct btf_type;
|
||||
union bpf_attr;
|
||||
struct btf_show;
|
||||
struct btf_id_set;
|
||||
|
||||
struct btf_kfunc_id_set {
|
||||
struct module *owner;
|
||||
union {
|
||||
struct {
|
||||
struct btf_id_set *check_set;
|
||||
struct btf_id_set *acquire_set;
|
||||
struct btf_id_set *release_set;
|
||||
struct btf_id_set *ret_null_set;
|
||||
};
|
||||
struct btf_id_set *sets[BTF_KFUNC_TYPE_MAX];
|
||||
};
|
||||
};
|
||||
|
||||
extern const struct file_operations btf_fops;
|
||||
|
||||
|
|
@ -216,6 +238,11 @@ static inline bool btf_type_is_var(const struct btf_type *t)
|
|||
return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
|
||||
}
|
||||
|
||||
static inline bool btf_type_is_type_tag(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG;
|
||||
}
|
||||
|
||||
/* union is only a special case of struct:
|
||||
* all its offsetof(member) == 0
|
||||
*/
|
||||
|
|
@ -300,6 +327,11 @@ static inline const struct btf_var_secinfo *btf_type_var_secinfo(
|
|||
return (const struct btf_var_secinfo *)(t + 1);
|
||||
}
|
||||
|
||||
static inline struct btf_param *btf_params(const struct btf_type *t)
|
||||
{
|
||||
return (struct btf_param *)(t + 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
struct bpf_prog;
|
||||
|
||||
|
|
@ -307,6 +339,11 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
|
|||
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
|
||||
struct btf *btf_parse_vmlinux(void);
|
||||
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
|
||||
bool btf_kfunc_id_set_contains(const struct btf *btf,
|
||||
enum bpf_prog_type prog_type,
|
||||
enum btf_kfunc_type type, u32 kfunc_btf_id);
|
||||
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
|
||||
const struct btf_kfunc_id_set *s);
|
||||
#else
|
||||
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
|
||||
u32 type_id)
|
||||
|
|
@ -318,50 +355,18 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct kfunc_btf_id_set {
|
||||
struct list_head list;
|
||||
struct btf_id_set *set;
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
struct kfunc_btf_id_list {
|
||||
struct list_head list;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
|
||||
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s);
|
||||
void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s);
|
||||
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
|
||||
struct module *owner);
|
||||
|
||||
extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
|
||||
extern struct kfunc_btf_id_list prog_test_kfunc_list;
|
||||
#else
|
||||
static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
}
|
||||
static inline void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
}
|
||||
static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
|
||||
u32 kfunc_id, struct module *owner)
|
||||
static inline bool btf_kfunc_id_set_contains(const struct btf *btf,
|
||||
enum bpf_prog_type prog_type,
|
||||
enum btf_kfunc_type type,
|
||||
u32 kfunc_btf_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused;
|
||||
static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused;
|
||||
static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
|
||||
const struct btf_kfunc_id_set *s)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define DEFINE_KFUNC_BTF_ID_SET(set, name) \
|
||||
struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \
|
||||
THIS_MODULE }
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ struct btf_id_set {
|
|||
#ifdef CONFIG_DEBUG_INFO_BTF
|
||||
|
||||
#include <linux/compiler.h> /* for __PASTE */
|
||||
#include <linux/compiler_attributes.h> /* for __maybe_unused */
|
||||
|
||||
/*
|
||||
* Following macros help to define lists of BTF IDs placed
|
||||
|
|
@ -146,14 +147,14 @@ extern struct btf_id_set name;
|
|||
|
||||
#else
|
||||
|
||||
#define BTF_ID_LIST(name) static u32 name[5];
|
||||
#define BTF_ID_LIST(name) static u32 __maybe_unused name[5];
|
||||
#define BTF_ID(prefix, name)
|
||||
#define BTF_ID_UNUSED
|
||||
#define BTF_ID_LIST_GLOBAL(name, n) u32 name[n];
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
|
||||
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1];
|
||||
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
|
||||
#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
|
||||
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
|
||||
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
|
||||
#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
|
||||
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
|
||||
#define BTF_SET_END(name)
|
||||
|
||||
#endif /* CONFIG_DEBUG_INFO_BTF */
|
||||
|
|
|
|||
|
|
@ -144,6 +144,7 @@ BUFFER_FNS(Defer_Completion, defer_completion)
|
|||
((struct buffer_head *)page_private(page)); \
|
||||
})
|
||||
#define page_has_buffers(page) PagePrivate(page)
|
||||
#define folio_buffers(folio) folio_get_private(folio)
|
||||
|
||||
void buffer_check_dirty_writeback(struct page *page,
|
||||
bool *dirty, bool *writeback);
|
||||
|
|
@ -216,16 +217,14 @@ extern int buffer_heads_over_limit;
|
|||
* Generic address_space_operations implementations for buffer_head-backed
|
||||
* address_spaces.
|
||||
*/
|
||||
void block_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length);
|
||||
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
|
||||
int block_write_full_page(struct page *page, get_block_t *get_block,
|
||||
struct writeback_control *wbc);
|
||||
int __block_write_full_page(struct inode *inode, struct page *page,
|
||||
get_block_t *get_block, struct writeback_control *wbc,
|
||||
bh_end_io_t *handler);
|
||||
int block_read_full_page(struct page*, get_block_t*);
|
||||
int block_is_partially_uptodate(struct page *page, unsigned long from,
|
||||
unsigned long count);
|
||||
bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
|
||||
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
|
||||
unsigned flags, struct page **pagep, get_block_t *get_block);
|
||||
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
|
||||
|
|
@ -398,7 +397,7 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
|
|||
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
|
||||
}
|
||||
|
||||
extern int __set_page_dirty_buffers(struct page *page);
|
||||
bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
|
||||
|
||||
#else /* CONFIG_BLOCK */
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
struct folio;
|
||||
|
||||
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ struct can_tdc_const {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_CAN_CALC_BITTIMING
|
||||
int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
|
||||
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
|
||||
const struct can_bittiming_const *btc);
|
||||
|
||||
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
|
||||
|
|
@ -121,7 +121,7 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
|
|||
u32 *ctrlmode, u32 ctrlmode_supported);
|
||||
#else /* !CONFIG_CAN_CALC_BITTIMING */
|
||||
static inline int
|
||||
can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
|
||||
can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
|
||||
const struct can_bittiming_const *btc)
|
||||
{
|
||||
netdev_err(dev, "bit-timing calculation not available\n");
|
||||
|
|
@ -136,7 +136,7 @@ can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
|
|||
}
|
||||
#endif /* CONFIG_CAN_CALC_BITTIMING */
|
||||
|
||||
int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
|
||||
int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
|
||||
const struct can_bittiming_const *btc,
|
||||
const u32 *bitrate_const,
|
||||
const unsigned int bitrate_const_cnt);
|
||||
|
|
|
|||
|
|
@ -28,8 +28,8 @@
|
|||
|
||||
|
||||
#define CEPH_INO_ROOT 1
|
||||
#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
|
||||
#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
|
||||
#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
|
||||
#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */
|
||||
|
||||
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
|
||||
#define CEPH_MAX_MON 31
|
||||
|
|
@ -328,6 +328,7 @@ enum {
|
|||
CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
|
||||
CEPH_MDS_OP_LOOKUPINO = 0x00104,
|
||||
CEPH_MDS_OP_LOOKUPNAME = 0x00105,
|
||||
CEPH_MDS_OP_GETVXATTR = 0x00106,
|
||||
|
||||
CEPH_MDS_OP_SETXATTR = 0x01105,
|
||||
CEPH_MDS_OP_RMXATTR = 0x01106,
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@
|
|||
#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
|
||||
#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
|
||||
#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
|
||||
#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
|
||||
|
||||
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
|
||||
|
||||
|
|
@ -283,6 +284,7 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
|
|||
|
||||
extern struct kmem_cache *ceph_inode_cachep;
|
||||
extern struct kmem_cache *ceph_cap_cachep;
|
||||
extern struct kmem_cache *ceph_cap_snap_cachep;
|
||||
extern struct kmem_cache *ceph_cap_flush_cachep;
|
||||
extern struct kmem_cache *ceph_dentry_cachep;
|
||||
extern struct kmem_cache *ceph_file_cachep;
|
||||
|
|
|
|||
|
|
@ -383,6 +383,10 @@ struct ceph_connection_v2_info {
|
|||
struct ceph_gcm_nonce in_gcm_nonce;
|
||||
struct ceph_gcm_nonce out_gcm_nonce;
|
||||
|
||||
struct page **in_enc_pages;
|
||||
int in_enc_page_cnt;
|
||||
int in_enc_resid;
|
||||
int in_enc_i;
|
||||
struct page **out_enc_pages;
|
||||
int out_enc_page_cnt;
|
||||
int out_enc_resid;
|
||||
|
|
@ -457,6 +461,7 @@ struct ceph_connection {
|
|||
struct ceph_msg *out_msg; /* sending message (== tail of
|
||||
out_sent) */
|
||||
|
||||
struct page *bounce_page;
|
||||
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
|
||||
|
||||
struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
|
||||
|
|
|
|||
|
|
@ -34,8 +34,17 @@ static inline void cfi_module_remove(struct module *mod, unsigned long base_addr
|
|||
|
||||
#else /* !CONFIG_CFI_CLANG */
|
||||
|
||||
#define __CFI_ADDRESSABLE(fn, __attr)
|
||||
#ifdef CONFIG_X86_KERNEL_IBT
|
||||
|
||||
#define __CFI_ADDRESSABLE(fn, __attr) \
|
||||
const void *__cfi_jt_ ## fn __visible __attr = (void *)&fn
|
||||
|
||||
#endif /* CONFIG_X86_KERNEL_IBT */
|
||||
|
||||
#endif /* CONFIG_CFI_CLANG */
|
||||
|
||||
#ifndef __CFI_ADDRESSABLE
|
||||
#define __CFI_ADDRESSABLE(fn, __attr)
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_CFI_H */
|
||||
|
|
|
|||
|
|
@ -450,6 +450,7 @@ extern struct mutex cgroup_mutex;
|
|||
extern spinlock_t css_set_lock;
|
||||
#define task_css_set_check(task, __c) \
|
||||
rcu_dereference_check((task)->cgroups, \
|
||||
rcu_read_lock_sched_held() || \
|
||||
lockdep_is_held(&cgroup_mutex) || \
|
||||
lockdep_is_held(&css_set_lock) || \
|
||||
((task)->flags & PF_EXITING) || (__c))
|
||||
|
|
@ -791,11 +792,9 @@ static inline void cgroup_account_cputime(struct task_struct *task,
|
|||
|
||||
cpuacct_charge(task, delta_exec);
|
||||
|
||||
rcu_read_lock();
|
||||
cgrp = task_dfl_cgroup(task);
|
||||
if (cgroup_parent(cgrp))
|
||||
__cgroup_account_cputime(cgrp, delta_exec);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void cgroup_account_cputime_field(struct task_struct *task,
|
||||
|
|
@ -806,11 +805,9 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
|
|||
|
||||
cpuacct_account_field(task, index, delta_exec);
|
||||
|
||||
rcu_read_lock();
|
||||
cgrp = task_dfl_cgroup(task);
|
||||
if (cgroup_parent(cgrp))
|
||||
__cgroup_account_cputime_field(cgrp, index, delta_exec);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else /* CONFIG_CGROUPS */
|
||||
|
|
|
|||
1
include/linux/cgroup_api.h
Normal file
1
include/linux/cgroup_api.h
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include <linux/cgroup.h>
|
||||
|
|
@ -888,7 +888,7 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
|
|||
struct clk_mux {
|
||||
struct clk_hw hw;
|
||||
void __iomem *reg;
|
||||
u32 *table;
|
||||
const u32 *table;
|
||||
u32 mask;
|
||||
u8 shift;
|
||||
u8 flags;
|
||||
|
|
@ -913,18 +913,18 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
|
|||
const struct clk_hw **parent_hws,
|
||||
const struct clk_parent_data *parent_data,
|
||||
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
|
||||
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
|
||||
u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
|
||||
struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
|
||||
const char *name, u8 num_parents,
|
||||
const char * const *parent_names,
|
||||
const struct clk_hw **parent_hws,
|
||||
const struct clk_parent_data *parent_data,
|
||||
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
|
||||
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
|
||||
u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
|
||||
struct clk *clk_register_mux_table(struct device *dev, const char *name,
|
||||
const char * const *parent_names, u8 num_parents,
|
||||
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
|
||||
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
|
||||
u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
|
||||
|
||||
#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \
|
||||
shift, width, clk_mux_flags, lock) \
|
||||
|
|
@ -962,9 +962,9 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
|
|||
(shift), BIT((width)) - 1, (clk_mux_flags), \
|
||||
NULL, (lock))
|
||||
|
||||
int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
|
||||
int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
|
||||
unsigned int val);
|
||||
unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index);
|
||||
unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index);
|
||||
|
||||
void clk_unregister_mux(struct clk *clk);
|
||||
void clk_hw_unregister_mux(struct clk_hw *hw);
|
||||
|
|
@ -1003,6 +1003,9 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
|
|||
struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
|
||||
const char *name, const char *parent_name, unsigned long flags,
|
||||
unsigned int mult, unsigned int div);
|
||||
struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
|
||||
const char *name, unsigned int index, unsigned long flags,
|
||||
unsigned int mult, unsigned int div);
|
||||
/**
|
||||
* struct clk_fractional_divider - adjustable fractional divider clock
|
||||
*
|
||||
|
|
|
|||
|
|
@ -986,6 +986,17 @@ static inline void clk_bulk_disable_unprepare(int num_clks,
|
|||
clk_bulk_unprepare(num_clks, clks);
|
||||
}
|
||||
|
||||
/**
|
||||
* clk_drop_range - Reset any range set on that clock
|
||||
* @clk: clock source
|
||||
*
|
||||
* Returns success (0) or negative errno.
|
||||
*/
|
||||
static inline int clk_drop_range(struct clk *clk)
|
||||
{
|
||||
return clk_set_rate_range(clk, 0, ULONG_MAX);
|
||||
}
|
||||
|
||||
/**
|
||||
* clk_get_optional - lookup and obtain a reference to an optional clock
|
||||
* producer.
|
||||
|
|
|
|||
|
|
@ -78,6 +78,10 @@
|
|||
#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */
|
||||
|
||||
#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */
|
||||
|
||||
#define AT91_PMC_RATIO 0x2c /* Processor clock ratio register [SAMA7G5 only] */
|
||||
#define AT91_PMC_RATIO_RATIO (0xf) /* CPU clock ratio. */
|
||||
|
||||
#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */
|
||||
#define AT91_PMC_DIV (0xff << 0) /* Divider */
|
||||
#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */
|
||||
|
|
|
|||
|
|
@ -9,4 +9,6 @@
|
|||
int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
|
||||
int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
|
||||
|
||||
int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -12,14 +12,18 @@
|
|||
*/
|
||||
#ifdef CONFIG_CMA_AREAS
|
||||
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
||||
|
||||
#else
|
||||
#define MAX_CMA_AREAS (0)
|
||||
|
||||
#endif
|
||||
|
||||
#define CMA_MAX_NAME 64
|
||||
|
||||
/*
|
||||
* TODO: once the buddy -- especially pageblock merging and alloc_contig_range()
|
||||
* -- can deal with only some pageblocks of a higher-order page being
|
||||
* MIGRATE_CMA, we can use pageblock_nr_pages.
|
||||
*/
|
||||
#define CMA_MIN_ALIGNMENT_PAGES MAX_ORDER_NR_PAGES
|
||||
#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
|
||||
|
||||
struct cma;
|
||||
|
||||
extern unsigned long totalcma_pages;
|
||||
|
|
@ -50,4 +54,6 @@ extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned
|
|||
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
|
||||
|
||||
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
|
||||
|
||||
extern void cma_reserve_pages_on_error(struct cma *cma);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -68,3 +68,28 @@
|
|||
|
||||
#define __nocfi __attribute__((__no_sanitize__("cfi")))
|
||||
#define __cficanonical __attribute__((__cfi_canonical_jump_table__))
|
||||
|
||||
/*
|
||||
* Turn individual warnings and errors on and off locally, depending
|
||||
* on version.
|
||||
*/
|
||||
#define __diag_clang(version, severity, s) \
|
||||
__diag_clang_ ## version(__diag_clang_ ## severity s)
|
||||
|
||||
/* Severity used in pragma directives */
|
||||
#define __diag_clang_ignore ignored
|
||||
#define __diag_clang_warn warning
|
||||
#define __diag_clang_error error
|
||||
|
||||
#define __diag_str1(s) #s
|
||||
#define __diag_str(s) __diag_str1(s)
|
||||
#define __diag(s) _Pragma(__diag_str(clang diagnostic s))
|
||||
|
||||
#if CONFIG_CLANG_VERSION >= 110000
|
||||
#define __diag_clang_11(s) __diag(s)
|
||||
#else
|
||||
#define __diag_clang_11(s)
|
||||
#endif
|
||||
|
||||
#define __diag_ignore_all(option, comment) \
|
||||
__diag_clang(11, ignore, option)
|
||||
|
|
|
|||
|
|
@ -97,6 +97,10 @@
|
|||
#define KASAN_ABI_VERSION 4
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SHADOW_CALL_STACK
|
||||
#define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
|
||||
#endif
|
||||
|
||||
#if __has_attribute(__no_sanitize_address__)
|
||||
#define __no_sanitize_address __attribute__((no_sanitize_address))
|
||||
#else
|
||||
|
|
@ -151,6 +155,9 @@
|
|||
#define __diag_GCC_8(s)
|
||||
#endif
|
||||
|
||||
#define __diag_ignore_all(option, comment) \
|
||||
__diag_GCC(8, ignore, option)
|
||||
|
||||
/*
|
||||
* Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
|
||||
* attribute) do not work, and must be disabled.
|
||||
|
|
|
|||
|
|
@ -117,14 +117,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||
*/
|
||||
#define __stringify_label(n) #n
|
||||
|
||||
#define __annotate_reachable(c) ({ \
|
||||
asm volatile(__stringify_label(c) ":\n\t" \
|
||||
".pushsection .discard.reachable\n\t" \
|
||||
".long " __stringify_label(c) "b - .\n\t" \
|
||||
".popsection\n\t" : : "i" (c)); \
|
||||
})
|
||||
#define annotate_reachable() __annotate_reachable(__COUNTER__)
|
||||
|
||||
#define __annotate_unreachable(c) ({ \
|
||||
asm volatile(__stringify_label(c) ":\n\t" \
|
||||
".pushsection .discard.unreachable\n\t" \
|
||||
|
|
@ -133,24 +125,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||
})
|
||||
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
|
||||
|
||||
#define ASM_UNREACHABLE \
|
||||
"999:\n\t" \
|
||||
".pushsection .discard.unreachable\n\t" \
|
||||
".long 999b - .\n\t" \
|
||||
".popsection\n\t"
|
||||
|
||||
/* Annotate a C jump table to allow objtool to follow the code flow */
|
||||
#define __annotate_jump_table __section(".rodata..c_jump_table")
|
||||
|
||||
#else
|
||||
#define annotate_reachable()
|
||||
#define annotate_unreachable()
|
||||
#define __annotate_jump_table
|
||||
#endif
|
||||
|
||||
#ifndef ASM_UNREACHABLE
|
||||
# define ASM_UNREACHABLE
|
||||
#endif
|
||||
#ifndef unreachable
|
||||
# define unreachable() do { \
|
||||
annotate_unreachable(); \
|
||||
|
|
|
|||
|
|
@ -100,6 +100,19 @@
|
|||
# define __copy(symbol)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Optional: not supported by gcc
|
||||
* Optional: only supported since clang >= 14.0
|
||||
* Optional: not supported by icc
|
||||
*
|
||||
* clang: https://clang.llvm.org/docs/AttributeReference.html#diagnose_as_builtin
|
||||
*/
|
||||
#if __has_attribute(__diagnose_as_builtin__)
|
||||
# define __diagnose_as(builtin...) __attribute__((__diagnose_as_builtin__(builtin)))
|
||||
#else
|
||||
# define __diagnose_as(builtin...)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
|
||||
* attribute warnings entirely and for good") for more information.
|
||||
|
|
@ -257,12 +270,38 @@
|
|||
*/
|
||||
#define __noreturn __attribute__((__noreturn__))
|
||||
|
||||
/*
|
||||
* Optional: not supported by gcc.
|
||||
* Optional: not supported by icc.
|
||||
*
|
||||
* clang: https://clang.llvm.org/docs/AttributeReference.html#overloadable
|
||||
*/
|
||||
#if __has_attribute(__overloadable__)
|
||||
# define __overloadable __attribute__((__overloadable__))
|
||||
#else
|
||||
# define __overloadable
|
||||
#endif
|
||||
|
||||
/*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
|
||||
* clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
|
||||
*/
|
||||
#define __packed __attribute__((__packed__))
|
||||
|
||||
/*
|
||||
* Note: the "type" argument should match any __builtin_object_size(p, type) usage.
|
||||
*
|
||||
* Optional: not supported by gcc.
|
||||
* Optional: not supported by icc.
|
||||
*
|
||||
* clang: https://clang.llvm.org/docs/AttributeReference.html#pass-object-size-pass-dynamic-object-size
|
||||
*/
|
||||
#if __has_attribute(__pass_object_size__)
|
||||
# define __pass_object_size(type) __attribute__((__pass_object_size__(type)))
|
||||
#else
|
||||
# define __pass_object_size(type)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -4,6 +4,14 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
|
||||
__has_attribute(btf_type_tag)
|
||||
# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
|
||||
#else
|
||||
# define BTF_TYPE_TAG(value) /* nothing */
|
||||
#endif
|
||||
|
||||
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
|
||||
#ifdef __CHECKER__
|
||||
/* address spaces */
|
||||
# define __kernel __attribute__((address_space(0)))
|
||||
|
|
@ -32,10 +40,10 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
|
|||
# ifdef STRUCTLEAK_PLUGIN
|
||||
# define __user __attribute__((user))
|
||||
# else
|
||||
# define __user
|
||||
# define __user BTF_TYPE_TAG(user)
|
||||
# endif
|
||||
# define __iomem
|
||||
# define __percpu
|
||||
# define __percpu BTF_TYPE_TAG(percpu)
|
||||
# define __rcu
|
||||
# define __chk_user_ptr(x) (void)0
|
||||
# define __chk_io_ptr(x) (void)0
|
||||
|
|
@ -137,8 +145,6 @@ struct ftrace_likely_data {
|
|||
*/
|
||||
#define __naked __attribute__((__naked__)) notrace
|
||||
|
||||
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
|
||||
|
||||
/*
|
||||
* Prefer gnu_inline, so that extern inline functions do not emit an
|
||||
* externally visible function. This makes extern inline behave as per gnu89
|
||||
|
|
@ -368,4 +374,8 @@ struct ftrace_likely_data {
|
|||
#define __diag_error(compiler, version, option, comment) \
|
||||
__diag_ ## compiler(version, error, option)
|
||||
|
||||
#ifndef __diag_ignore_all
|
||||
#define __diag_ignore_all(option, comment)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_COMPILER_TYPES_H */
|
||||
|
|
|
|||
|
|
@ -38,10 +38,10 @@ int component_add_typed(struct device *dev, const struct component_ops *ops,
|
|||
int subcomponent);
|
||||
void component_del(struct device *, const struct component_ops *);
|
||||
|
||||
int component_bind_all(struct device *master, void *master_data);
|
||||
void component_unbind_all(struct device *master, void *master_data);
|
||||
int component_bind_all(struct device *parent, void *data);
|
||||
void component_unbind_all(struct device *parent, void *data);
|
||||
|
||||
struct master;
|
||||
struct aggregate_device;
|
||||
|
||||
/**
|
||||
* struct component_master_ops - callback for the aggregate driver
|
||||
|
|
@ -82,6 +82,12 @@ struct component_master_ops {
|
|||
void (*unbind)(struct device *master);
|
||||
};
|
||||
|
||||
/* A set helper functions for component compare/release */
|
||||
int component_compare_of(struct device *dev, void *data);
|
||||
void component_release_of(struct device *dev, void *data);
|
||||
int component_compare_dev(struct device *dev, void *data);
|
||||
int component_compare_dev_name(struct device *dev, void *data);
|
||||
|
||||
void component_master_del(struct device *,
|
||||
const struct component_master_ops *);
|
||||
|
||||
|
|
@ -89,22 +95,22 @@ struct component_match;
|
|||
|
||||
int component_master_add_with_match(struct device *,
|
||||
const struct component_master_ops *, struct component_match *);
|
||||
void component_match_add_release(struct device *master,
|
||||
void component_match_add_release(struct device *parent,
|
||||
struct component_match **matchptr,
|
||||
void (*release)(struct device *, void *),
|
||||
int (*compare)(struct device *, void *), void *compare_data);
|
||||
void component_match_add_typed(struct device *master,
|
||||
void component_match_add_typed(struct device *parent,
|
||||
struct component_match **matchptr,
|
||||
int (*compare_typed)(struct device *, int, void *), void *compare_data);
|
||||
|
||||
/**
|
||||
* component_match_add - add a component match entry
|
||||
* @master: device with the aggregate driver
|
||||
* @parent: device with the aggregate driver
|
||||
* @matchptr: pointer to the list of component matches
|
||||
* @compare: compare function to match against all components
|
||||
* @compare_data: opaque pointer passed to the @compare function
|
||||
*
|
||||
* Adds a new component match to the list stored in @matchptr, which the @master
|
||||
* Adds a new component match to the list stored in @matchptr, which the @parent
|
||||
* aggregate driver needs to function. The list of component matches pointed to
|
||||
* by @matchptr must be initialized to NULL before adding the first match. This
|
||||
* only matches against components added with component_add().
|
||||
|
|
@ -114,11 +120,11 @@ void component_match_add_typed(struct device *master,
|
|||
*
|
||||
* See also component_match_add_release() and component_match_add_typed().
|
||||
*/
|
||||
static inline void component_match_add(struct device *master,
|
||||
static inline void component_match_add(struct device *parent,
|
||||
struct component_match **matchptr,
|
||||
int (*compare)(struct device *, void *), void *compare_data)
|
||||
{
|
||||
component_match_add_release(master, matchptr, NULL, compare,
|
||||
component_match_add_release(parent, matchptr, NULL, compare,
|
||||
compare_data);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,22 +12,34 @@ struct core_vma_metadata {
|
|||
unsigned long start, end;
|
||||
unsigned long flags;
|
||||
unsigned long dump_size;
|
||||
unsigned long pgoff;
|
||||
struct file *file;
|
||||
};
|
||||
|
||||
struct coredump_params {
|
||||
const kernel_siginfo_t *siginfo;
|
||||
struct pt_regs *regs;
|
||||
struct file *file;
|
||||
unsigned long limit;
|
||||
unsigned long mm_flags;
|
||||
loff_t written;
|
||||
loff_t pos;
|
||||
loff_t to_skip;
|
||||
int vma_count;
|
||||
size_t vma_data_size;
|
||||
struct core_vma_metadata *vma_meta;
|
||||
};
|
||||
|
||||
/*
|
||||
* These are the only things you should do on a core-file: use only these
|
||||
* functions to write out all the necessary info.
|
||||
*/
|
||||
struct coredump_params;
|
||||
extern void dump_skip_to(struct coredump_params *cprm, unsigned long to);
|
||||
extern void dump_skip(struct coredump_params *cprm, size_t nr);
|
||||
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
|
||||
extern int dump_align(struct coredump_params *cprm, int align);
|
||||
int dump_user_range(struct coredump_params *cprm, unsigned long start,
|
||||
unsigned long len);
|
||||
int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
|
||||
struct core_vma_metadata **vma_meta,
|
||||
size_t *vma_data_size_ptr);
|
||||
extern void do_coredump(const kernel_siginfo_t *siginfo);
|
||||
#else
|
||||
static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
|
||||
|
|
|
|||
|
|
@ -36,7 +36,6 @@
|
|||
extern struct bus_type coresight_bustype;
|
||||
|
||||
enum coresight_dev_type {
|
||||
CORESIGHT_DEV_TYPE_NONE,
|
||||
CORESIGHT_DEV_TYPE_SINK,
|
||||
CORESIGHT_DEV_TYPE_LINK,
|
||||
CORESIGHT_DEV_TYPE_LINKSINK,
|
||||
|
|
@ -46,7 +45,6 @@ enum coresight_dev_type {
|
|||
};
|
||||
|
||||
enum coresight_dev_subtype_sink {
|
||||
CORESIGHT_DEV_SUBTYPE_SINK_NONE,
|
||||
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
|
||||
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
|
||||
CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
|
||||
|
|
@ -54,21 +52,18 @@ enum coresight_dev_subtype_sink {
|
|||
};
|
||||
|
||||
enum coresight_dev_subtype_link {
|
||||
CORESIGHT_DEV_SUBTYPE_LINK_NONE,
|
||||
CORESIGHT_DEV_SUBTYPE_LINK_MERG,
|
||||
CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
|
||||
CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
|
||||
};
|
||||
|
||||
enum coresight_dev_subtype_source {
|
||||
CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
|
||||
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
|
||||
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
|
||||
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
|
||||
};
|
||||
|
||||
enum coresight_dev_subtype_helper {
|
||||
CORESIGHT_DEV_SUBTYPE_HELPER_NONE,
|
||||
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -382,6 +382,9 @@ struct cpufreq_driver {
|
|||
int (*suspend)(struct cpufreq_policy *policy);
|
||||
int (*resume)(struct cpufreq_policy *policy);
|
||||
|
||||
/* Will be called after the driver is fully initialized */
|
||||
void (*ready)(struct cpufreq_policy *policy);
|
||||
|
||||
struct freq_attr **attr;
|
||||
|
||||
/* platform specific boost support code */
|
||||
|
|
@ -658,6 +661,11 @@ struct gov_attr_set {
|
|||
/* sysfs ops for cpufreq governors */
|
||||
extern const struct sysfs_ops governor_sysfs_ops;
|
||||
|
||||
static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
|
||||
{
|
||||
return container_of(kobj, struct gov_attr_set, kobj);
|
||||
}
|
||||
|
||||
void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
|
||||
void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
|
||||
unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
|
||||
|
|
|
|||
|
|
@ -100,6 +100,7 @@ enum cpuhp_state {
|
|||
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
|
||||
CPUHP_PADATA_DEAD,
|
||||
CPUHP_AP_DTPM_CPU_DEAD,
|
||||
CPUHP_RANDOM_PREPARE,
|
||||
CPUHP_WORKQUEUE_PREP,
|
||||
CPUHP_POWER_NUMA_PREPARE,
|
||||
CPUHP_HRTIMERS_PREPARE,
|
||||
|
|
@ -165,6 +166,7 @@ enum cpuhp_state {
|
|||
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
|
||||
CPUHP_AP_PERF_ARM_ACPI_STARTING,
|
||||
CPUHP_AP_PERF_ARM_STARTING,
|
||||
CPUHP_AP_PERF_RISCV_STARTING,
|
||||
CPUHP_AP_ARM_L2X0_STARTING,
|
||||
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||
|
|
@ -231,6 +233,7 @@ enum cpuhp_state {
|
|||
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
|
||||
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
|
||||
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
|
||||
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
|
||||
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
|
||||
|
|
@ -240,6 +243,7 @@ enum cpuhp_state {
|
|||
CPUHP_AP_PERF_CSKY_ONLINE,
|
||||
CPUHP_AP_WATCHDOG_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RANDOM_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_BASE_CACHEINFO_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ extern atomic_t __num_online_cpus;
|
|||
|
||||
extern cpumask_t cpus_booted_once_mask;
|
||||
|
||||
static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
|
||||
static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
WARN_ON_ONCE(cpu >= bits);
|
||||
|
|
@ -110,7 +110,7 @@ static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
|
|||
}
|
||||
|
||||
/* verify cpu argument to cpumask_* operators */
|
||||
static inline unsigned int cpumask_check(unsigned int cpu)
|
||||
static __always_inline unsigned int cpumask_check(unsigned int cpu)
|
||||
{
|
||||
cpu_max_bits_warn(cpu, nr_cpumask_bits);
|
||||
return cpu;
|
||||
|
|
@ -341,12 +341,12 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
|
|||
* @cpu: cpu number (< nr_cpu_ids)
|
||||
* @dstp: the cpumask pointer
|
||||
*/
|
||||
static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
|
||||
static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
|
||||
{
|
||||
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
|
||||
}
|
||||
|
||||
static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
|
||||
static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
|
||||
{
|
||||
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
|
||||
}
|
||||
|
|
@ -357,12 +357,12 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
|
|||
* @cpu: cpu number (< nr_cpu_ids)
|
||||
* @dstp: the cpumask pointer
|
||||
*/
|
||||
static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
|
||||
static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
|
||||
{
|
||||
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
|
||||
}
|
||||
|
||||
static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
|
||||
static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
|
||||
{
|
||||
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
|
||||
}
|
||||
|
|
@ -374,7 +374,7 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
|
|||
*
|
||||
* Returns 1 if @cpu is set in @cpumask, else returns 0
|
||||
*/
|
||||
static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
|
||||
static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
|
||||
{
|
||||
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
|
||||
}
|
||||
|
|
@ -388,7 +388,7 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
|
|||
*
|
||||
* test_and_set_bit wrapper for cpumasks.
|
||||
*/
|
||||
static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
|
||||
static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
|
||||
{
|
||||
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
|
||||
}
|
||||
|
|
@ -402,7 +402,7 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
|
|||
*
|
||||
* test_and_clear_bit wrapper for cpumasks.
|
||||
*/
|
||||
static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
|
||||
static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
|
||||
{
|
||||
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
|
||||
}
|
||||
|
|
|
|||
1
include/linux/cpumask_api.h
Normal file
1
include/linux/cpumask_api.h
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include <linux/cpumask.h>
|
||||
|
|
@ -7,5 +7,12 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CRC64_ROCKSOFT_STRING "crc64-rocksoft"
|
||||
|
||||
u64 __pure crc64_be(u64 crc, const void *p, size_t len);
|
||||
u64 __pure crc64_rocksoft_generic(u64 crc, const void *p, size_t len);
|
||||
|
||||
u64 crc64_rocksoft(const unsigned char *buffer, size_t len);
|
||||
u64 crc64_rocksoft_update(u64 crc, const unsigned char *buffer, size_t len);
|
||||
|
||||
#endif /* _LINUX_CRC64_H */
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ extern int set_cred_ucounts(struct cred *);
|
|||
* check for validity of credentials
|
||||
*/
|
||||
#ifdef CONFIG_DEBUG_CREDENTIALS
|
||||
extern void __invalid_creds(const struct cred *, const char *, unsigned);
|
||||
extern void __noreturn __invalid_creds(const struct cred *, const char *, unsigned);
|
||||
extern void __validate_process_creds(struct task_struct *,
|
||||
const char *, unsigned);
|
||||
|
||||
|
|
|
|||
|
|
@ -132,6 +132,15 @@
|
|||
*/
|
||||
#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
|
||||
|
||||
/*
|
||||
* Mark an algorithm as a service implementation only usable by a
|
||||
* template and never by a normal user of the kernel crypto API.
|
||||
* This is intended to be used by algorithms that are themselves
|
||||
* not FIPS-approved but may instead be used to implement parts of
|
||||
* a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
|
||||
*/
|
||||
#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
|
||||
|
||||
/*
|
||||
* Transform masks and values (for crt_flags).
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -60,19 +60,18 @@ struct damon_region {
|
|||
|
||||
/**
|
||||
* struct damon_target - Represents a monitoring target.
|
||||
* @id: Unique identifier for this target.
|
||||
* @pid: The PID of the virtual address space to monitor.
|
||||
* @nr_regions: Number of monitoring target regions of this target.
|
||||
* @regions_list: Head of the monitoring target regions of this target.
|
||||
* @list: List head for siblings.
|
||||
*
|
||||
* Each monitoring context could have multiple targets. For example, a context
|
||||
* for virtual memory address spaces could have multiple target processes. The
|
||||
* @id of each target should be unique among the targets of the context. For
|
||||
* example, in the virtual address monitoring context, it could be a pidfd or
|
||||
* an address of an mm_struct.
|
||||
* @pid should be set for appropriate &struct damon_operations including the
|
||||
* virtual address spaces monitoring operations.
|
||||
*/
|
||||
struct damon_target {
|
||||
unsigned long id;
|
||||
struct pid *pid;
|
||||
unsigned int nr_regions;
|
||||
struct list_head regions_list;
|
||||
struct list_head list;
|
||||
|
|
@ -88,6 +87,7 @@ struct damon_target {
|
|||
* @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
|
||||
* @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
|
||||
* @DAMOS_STAT: Do nothing but count the stat.
|
||||
* @NR_DAMOS_ACTIONS: Total number of DAMOS actions
|
||||
*/
|
||||
enum damos_action {
|
||||
DAMOS_WILLNEED,
|
||||
|
|
@ -96,6 +96,7 @@ enum damos_action {
|
|||
DAMOS_HUGEPAGE,
|
||||
DAMOS_NOHUGEPAGE,
|
||||
DAMOS_STAT, /* Do nothing but only record the stat */
|
||||
NR_DAMOS_ACTIONS,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -121,9 +122,9 @@ enum damos_action {
|
|||
* uses smaller one as the effective quota.
|
||||
*
|
||||
* For selecting regions within the quota, DAMON prioritizes current scheme's
|
||||
* target memory regions using the &struct damon_primitive->get_scheme_score.
|
||||
* target memory regions using the &struct damon_operations->get_scheme_score.
|
||||
* You could customize the prioritization logic by setting &weight_sz,
|
||||
* &weight_nr_accesses, and &weight_age, because monitoring primitives are
|
||||
* &weight_nr_accesses, and &weight_age, because monitoring operations are
|
||||
* encouraged to respect those.
|
||||
*/
|
||||
struct damos_quota {
|
||||
|
|
@ -158,10 +159,12 @@ struct damos_quota {
|
|||
*
|
||||
* @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
|
||||
* @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
|
||||
* @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics
|
||||
*/
|
||||
enum damos_wmark_metric {
|
||||
DAMOS_WMARK_NONE,
|
||||
DAMOS_WMARK_FREE_MEM_RATE,
|
||||
NR_DAMOS_WMARK_METRICS,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -254,13 +257,26 @@ struct damos {
|
|||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum damon_ops_id - Identifier for each monitoring operations implementation
|
||||
*
|
||||
* @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces
|
||||
* @DAMON_OPS_PADDR: Monitoring operations for the physical address space
|
||||
*/
|
||||
enum damon_ops_id {
|
||||
DAMON_OPS_VADDR,
|
||||
DAMON_OPS_PADDR,
|
||||
NR_DAMON_OPS,
|
||||
};
|
||||
|
||||
struct damon_ctx;
|
||||
|
||||
/**
|
||||
* struct damon_primitive - Monitoring primitives for given use cases.
|
||||
* struct damon_operations - Monitoring operations for given use cases.
|
||||
*
|
||||
* @init: Initialize primitive-internal data structures.
|
||||
* @update: Update primitive-internal data structures.
|
||||
* @id: Identifier of this operations set.
|
||||
* @init: Initialize operations-related data structures.
|
||||
* @update: Update operations-related data structures.
|
||||
* @prepare_access_checks: Prepare next access check of target regions.
|
||||
* @check_accesses: Check the accesses to target regions.
|
||||
* @reset_aggregated: Reset aggregated accesses monitoring results.
|
||||
|
|
@ -270,18 +286,20 @@ struct damon_ctx;
|
|||
* @cleanup: Clean up the context.
|
||||
*
|
||||
* DAMON can be extended for various address spaces and usages. For this,
|
||||
* users should register the low level primitives for their target address
|
||||
* space and usecase via the &damon_ctx.primitive. Then, the monitoring thread
|
||||
* users should register the low level operations for their target address
|
||||
* space and usecase via the &damon_ctx.ops. Then, the monitoring thread
|
||||
* (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
|
||||
* the monitoring, @update after each &damon_ctx.primitive_update_interval, and
|
||||
* the monitoring, @update after each &damon_ctx.ops_update_interval, and
|
||||
* @check_accesses, @target_valid and @prepare_access_checks after each
|
||||
* &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each
|
||||
* &damon_ctx.aggr_interval.
|
||||
*
|
||||
* @init should initialize primitive-internal data structures. For example,
|
||||
* Each &struct damon_operations instance having valid @id can be registered
|
||||
* via damon_register_ops() and selected by damon_select_ops() later.
|
||||
* @init should initialize operations-related data structures. For example,
|
||||
* this could be used to construct proper monitoring target regions and link
|
||||
* those to @damon_ctx.adaptive_targets.
|
||||
* @update should update the primitive-internal data structures. For example,
|
||||
* @update should update the operations-related data structures. For example,
|
||||
* this could be used to update monitoring target regions for current status.
|
||||
* @prepare_access_checks should manipulate the monitoring regions to be
|
||||
* prepared for the next access check.
|
||||
|
|
@ -301,7 +319,8 @@ struct damon_ctx;
|
|||
* monitoring.
|
||||
* @cleanup is called from @kdamond just before its termination.
|
||||
*/
|
||||
struct damon_primitive {
|
||||
struct damon_operations {
|
||||
enum damon_ops_id id;
|
||||
void (*init)(struct damon_ctx *context);
|
||||
void (*update)(struct damon_ctx *context);
|
||||
void (*prepare_access_checks)(struct damon_ctx *context);
|
||||
|
|
@ -355,15 +374,15 @@ struct damon_callback {
|
|||
*
|
||||
* @sample_interval: The time between access samplings.
|
||||
* @aggr_interval: The time between monitor results aggregations.
|
||||
* @primitive_update_interval: The time between monitoring primitive updates.
|
||||
* @ops_update_interval: The time between monitoring operations updates.
|
||||
*
|
||||
* For each @sample_interval, DAMON checks whether each region is accessed or
|
||||
* not. It aggregates and keeps the access information (number of accesses to
|
||||
* each region) for @aggr_interval time. DAMON also checks whether the target
|
||||
* memory regions need update (e.g., by ``mmap()`` calls from the application,
|
||||
* in case of virtual memory monitoring) and applies the changes for each
|
||||
* @primitive_update_interval. All time intervals are in micro-seconds.
|
||||
* Please refer to &struct damon_primitive and &struct damon_callback for more
|
||||
* @ops_update_interval. All time intervals are in micro-seconds.
|
||||
* Please refer to &struct damon_operations and &struct damon_callback for more
|
||||
* detail.
|
||||
*
|
||||
* @kdamond: Kernel thread who does the monitoring.
|
||||
|
|
@ -375,7 +394,7 @@ struct damon_callback {
|
|||
*
|
||||
* Once started, the monitoring thread runs until explicitly required to be
|
||||
* terminated or every monitoring target is invalid. The validity of the
|
||||
* targets is checked via the &damon_primitive.target_valid of @primitive. The
|
||||
* targets is checked via the &damon_operations.target_valid of @ops. The
|
||||
* termination can also be explicitly requested by writing non-zero to
|
||||
* @kdamond_stop. The thread sets @kdamond to NULL when it terminates.
|
||||
* Therefore, users can know whether the monitoring is ongoing or terminated by
|
||||
|
|
@ -385,7 +404,7 @@ struct damon_callback {
|
|||
* Note that the monitoring thread protects only @kdamond and @kdamond_stop via
|
||||
* @kdamond_lock. Accesses to other fields must be protected by themselves.
|
||||
*
|
||||
* @primitive: Set of monitoring primitives for given use cases.
|
||||
* @ops: Set of monitoring operations for given use cases.
|
||||
* @callback: Set of callbacks for monitoring events notifications.
|
||||
*
|
||||
* @min_nr_regions: The minimum number of adaptive monitoring regions.
|
||||
|
|
@ -396,17 +415,17 @@ struct damon_callback {
|
|||
struct damon_ctx {
|
||||
unsigned long sample_interval;
|
||||
unsigned long aggr_interval;
|
||||
unsigned long primitive_update_interval;
|
||||
unsigned long ops_update_interval;
|
||||
|
||||
/* private: internal use only */
|
||||
struct timespec64 last_aggregation;
|
||||
struct timespec64 last_primitive_update;
|
||||
struct timespec64 last_ops_update;
|
||||
|
||||
/* public: */
|
||||
struct task_struct *kdamond;
|
||||
struct mutex kdamond_lock;
|
||||
|
||||
struct damon_primitive primitive;
|
||||
struct damon_operations ops;
|
||||
struct damon_callback callback;
|
||||
|
||||
unsigned long min_nr_regions;
|
||||
|
|
@ -475,7 +494,7 @@ struct damos *damon_new_scheme(
|
|||
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
|
||||
void damon_destroy_scheme(struct damos *s);
|
||||
|
||||
struct damon_target *damon_new_target(unsigned long id);
|
||||
struct damon_target *damon_new_target(void);
|
||||
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
|
||||
bool damon_targets_empty(struct damon_ctx *ctx);
|
||||
void damon_free_target(struct damon_target *t);
|
||||
|
|
@ -484,28 +503,18 @@ unsigned int damon_nr_regions(struct damon_target *t);
|
|||
|
||||
struct damon_ctx *damon_new_ctx(void);
|
||||
void damon_destroy_ctx(struct damon_ctx *ctx);
|
||||
int damon_set_targets(struct damon_ctx *ctx,
|
||||
unsigned long *ids, ssize_t nr_ids);
|
||||
int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
|
||||
unsigned long aggr_int, unsigned long primitive_upd_int,
|
||||
unsigned long aggr_int, unsigned long ops_upd_int,
|
||||
unsigned long min_nr_reg, unsigned long max_nr_reg);
|
||||
int damon_set_schemes(struct damon_ctx *ctx,
|
||||
struct damos **schemes, ssize_t nr_schemes);
|
||||
int damon_nr_running_ctxs(void);
|
||||
int damon_register_ops(struct damon_operations *ops);
|
||||
int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
|
||||
|
||||
int damon_start(struct damon_ctx **ctxs, int nr_ctxs);
|
||||
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
|
||||
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
|
||||
|
||||
#endif /* CONFIG_DAMON */
|
||||
|
||||
#ifdef CONFIG_DAMON_VADDR
|
||||
bool damon_va_target_valid(void *t);
|
||||
void damon_va_set_primitives(struct damon_ctx *ctx);
|
||||
#endif /* CONFIG_DAMON_VADDR */
|
||||
|
||||
#ifdef CONFIG_DAMON_PADDR
|
||||
bool damon_pa_target_valid(void *t);
|
||||
void damon_pa_set_primitives(struct damon_ctx *ctx);
|
||||
#endif /* CONFIG_DAMON_PADDR */
|
||||
|
||||
#endif /* _DAMON_H */
|
||||
|
|
|
|||
|
|
@ -316,12 +316,6 @@ struct dm_target {
|
|||
*/
|
||||
unsigned num_secure_erase_bios;
|
||||
|
||||
/*
|
||||
* The number of WRITE SAME bios that will be submitted to the target.
|
||||
* The bio number can be accessed with dm_bio_get_target_bio_nr.
|
||||
*/
|
||||
unsigned num_write_same_bios;
|
||||
|
||||
/*
|
||||
* The number of WRITE ZEROES bios that will be submitted to the target.
|
||||
* The bio number can be accessed with dm_bio_get_target_bio_nr.
|
||||
|
|
@ -358,10 +352,16 @@ struct dm_target {
|
|||
bool limit_swap_bios:1;
|
||||
|
||||
/*
|
||||
* Set if this target implements a a zoned device and needs emulation of
|
||||
* Set if this target implements a zoned device and needs emulation of
|
||||
* zone append operations using regular writes.
|
||||
*/
|
||||
bool emulate_zone_append:1;
|
||||
|
||||
/*
|
||||
* Set if the target will submit IO using dm_submit_bio_remap()
|
||||
* after returning DM_MAPIO_SUBMITTED from its map function.
|
||||
*/
|
||||
bool accounts_remapped_io:1;
|
||||
};
|
||||
|
||||
void *dm_per_bio_data(struct bio *bio, size_t data_size);
|
||||
|
|
@ -465,6 +465,7 @@ int dm_suspended(struct dm_target *ti);
|
|||
int dm_post_suspending(struct dm_target *ti);
|
||||
int dm_noflush_suspending(struct dm_target *ti);
|
||||
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
|
||||
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
|
||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
|
|
|||
|
|
@ -52,13 +52,13 @@
|
|||
*
|
||||
* struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
|
||||
*
|
||||
* dma_buf_map_set_vaddr(&map. 0xdeadbeaf);
|
||||
* dma_buf_map_set_vaddr(&map, 0xdeadbeaf);
|
||||
*
|
||||
* To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
|
||||
*
|
||||
* .. code-block:: c
|
||||
*
|
||||
* dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf);
|
||||
* dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf);
|
||||
*
|
||||
* Instances of struct dma_buf_map do not have to be cleaned up, but
|
||||
* can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
#ifndef __DMA_BUF_H__
|
||||
#define __DMA_BUF_H__
|
||||
|
||||
#include <linux/dma-buf-map.h>
|
||||
#include <linux/iosys-map.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
|
@ -283,8 +283,8 @@ struct dma_buf_ops {
|
|||
*/
|
||||
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
|
||||
|
||||
int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
|
||||
void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
|
||||
int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
|
||||
void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -347,7 +347,7 @@ struct dma_buf {
|
|||
* @vmap_ptr:
|
||||
* The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
|
||||
*/
|
||||
struct dma_buf_map vmap_ptr;
|
||||
struct iosys_map vmap_ptr;
|
||||
|
||||
/**
|
||||
* @exp_name:
|
||||
|
|
@ -628,6 +628,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
|
|||
|
||||
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
|
||||
unsigned long);
|
||||
int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
|
||||
void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
|
||||
int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
|
||||
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
|
||||
#endif /* __DMA_BUF_H__ */
|
||||
|
|
|
|||
|
|
@ -45,19 +45,6 @@ struct dma_fence_array {
|
|||
struct irq_work work;
|
||||
};
|
||||
|
||||
extern const struct dma_fence_ops dma_fence_array_ops;
|
||||
|
||||
/**
|
||||
* dma_fence_is_array - check if a fence is from the array subsclass
|
||||
* @fence: fence to test
|
||||
*
|
||||
* Return true if it is a dma_fence_array and false otherwise.
|
||||
*/
|
||||
static inline bool dma_fence_is_array(struct dma_fence *fence)
|
||||
{
|
||||
return fence->ops == &dma_fence_array_ops;
|
||||
}
|
||||
|
||||
/**
|
||||
* to_dma_fence_array - cast a fence to a dma_fence_array
|
||||
* @fence: fence to cast to a dma_fence_array
|
||||
|
|
@ -68,12 +55,27 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
|
|||
static inline struct dma_fence_array *
|
||||
to_dma_fence_array(struct dma_fence *fence)
|
||||
{
|
||||
if (fence->ops != &dma_fence_array_ops)
|
||||
if (!fence || !dma_fence_is_array(fence))
|
||||
return NULL;
|
||||
|
||||
return container_of(fence, struct dma_fence_array, base);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_array_for_each - iterate over all fences in array
|
||||
* @fence: current fence
|
||||
* @index: index into the array
|
||||
* @head: potential dma_fence_array object
|
||||
*
|
||||
* Test if @array is a dma_fence_array object and if yes iterate over all fences
|
||||
* in the array. If not just iterate over the fence in @array itself.
|
||||
*
|
||||
* For a deep dive iterator see dma_fence_unwrap_for_each().
|
||||
*/
|
||||
#define dma_fence_array_for_each(fence, index, head) \
|
||||
for (index = 0, fence = dma_fence_array_first(head); fence; \
|
||||
++(index), fence = dma_fence_array_next(head, index))
|
||||
|
||||
struct dma_fence_array *dma_fence_array_create(int num_fences,
|
||||
struct dma_fence **fences,
|
||||
u64 context, unsigned seqno,
|
||||
|
|
@ -81,4 +83,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
|
|||
|
||||
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
|
||||
|
||||
struct dma_fence *dma_fence_array_first(struct dma_fence *head);
|
||||
struct dma_fence *dma_fence_array_next(struct dma_fence *head,
|
||||
unsigned int index);
|
||||
|
||||
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@ struct dma_fence_chain {
|
|||
spinlock_t lock;
|
||||
};
|
||||
|
||||
extern const struct dma_fence_ops dma_fence_chain_ops;
|
||||
|
||||
/**
|
||||
* to_dma_fence_chain - cast a fence to a dma_fence_chain
|
||||
|
|
@ -61,12 +60,27 @@ extern const struct dma_fence_ops dma_fence_chain_ops;
|
|||
static inline struct dma_fence_chain *
|
||||
to_dma_fence_chain(struct dma_fence *fence)
|
||||
{
|
||||
if (!fence || fence->ops != &dma_fence_chain_ops)
|
||||
if (!fence || !dma_fence_is_chain(fence))
|
||||
return NULL;
|
||||
|
||||
return container_of(fence, struct dma_fence_chain, base);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_chain_contained - return the contained fence
|
||||
* @fence: the fence to test
|
||||
*
|
||||
* If the fence is a dma_fence_chain the function returns the fence contained
|
||||
* inside the chain object, otherwise it returns the fence itself.
|
||||
*/
|
||||
static inline struct dma_fence *
|
||||
dma_fence_chain_contained(struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence_chain *chain = to_dma_fence_chain(fence);
|
||||
|
||||
return chain ? chain->fence : fence;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_chain_alloc
|
||||
*
|
||||
|
|
@ -98,6 +112,8 @@ static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
|
|||
*
|
||||
* Iterate over all fences in the chain. We keep a reference to the current
|
||||
* fence while inside the loop which must be dropped when breaking out.
|
||||
*
|
||||
* For a deep dive iterator see dma_fence_unwrap_for_each().
|
||||
*/
|
||||
#define dma_fence_chain_for_each(iter, head) \
|
||||
for (iter = dma_fence_get(head); iter; \
|
||||
|
|
|
|||
95
include/linux/dma-fence-unwrap.h
Normal file
95
include/linux/dma-fence-unwrap.h
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* fence-chain: chain fences together in a timeline
|
||||
*
|
||||
* Copyright (C) 2022 Advanced Micro Devices, Inc.
|
||||
* Authors:
|
||||
* Christian König <christian.koenig@amd.com>
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_DMA_FENCE_UNWRAP_H
|
||||
#define __LINUX_DMA_FENCE_UNWRAP_H
|
||||
|
||||
#include <linux/dma-fence-chain.h>
|
||||
#include <linux/dma-fence-array.h>
|
||||
|
||||
/**
|
||||
* struct dma_fence_unwrap - cursor into the container structure
|
||||
*
|
||||
* Should be used with dma_fence_unwrap_for_each() iterator macro.
|
||||
*/
|
||||
struct dma_fence_unwrap {
|
||||
/**
|
||||
* @chain: potential dma_fence_chain, but can be other fence as well
|
||||
*/
|
||||
struct dma_fence *chain;
|
||||
/**
|
||||
* @array: potential dma_fence_array, but can be other fence as well
|
||||
*/
|
||||
struct dma_fence *array;
|
||||
/**
|
||||
* @index: last returned index if @array is really a dma_fence_array
|
||||
*/
|
||||
unsigned int index;
|
||||
};
|
||||
|
||||
/* Internal helper to start new array iteration, don't use directly */
|
||||
static inline struct dma_fence *
|
||||
__dma_fence_unwrap_array(struct dma_fence_unwrap * cursor)
|
||||
{
|
||||
cursor->array = dma_fence_chain_contained(cursor->chain);
|
||||
cursor->index = 0;
|
||||
return dma_fence_array_first(cursor->array);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_unwrap_first - return the first fence from fence containers
|
||||
* @head: the entrypoint into the containers
|
||||
* @cursor: current position inside the containers
|
||||
*
|
||||
* Unwraps potential dma_fence_chain/dma_fence_array containers and return the
|
||||
* first fence.
|
||||
*/
|
||||
static inline struct dma_fence *
|
||||
dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor)
|
||||
{
|
||||
cursor->chain = dma_fence_get(head);
|
||||
return __dma_fence_unwrap_array(cursor);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_unwrap_next - return the next fence from a fence containers
|
||||
* @cursor: current position inside the containers
|
||||
*
|
||||
* Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
|
||||
* the next fence from them.
|
||||
*/
|
||||
static inline struct dma_fence *
|
||||
dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
|
||||
{
|
||||
struct dma_fence *tmp;
|
||||
|
||||
++cursor->index;
|
||||
tmp = dma_fence_array_next(cursor->array, cursor->index);
|
||||
if (tmp)
|
||||
return tmp;
|
||||
|
||||
cursor->chain = dma_fence_chain_walk(cursor->chain);
|
||||
return __dma_fence_unwrap_array(cursor);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_unwrap_for_each - iterate over all fences in containers
|
||||
* @fence: current fence
|
||||
* @cursor: current position inside the containers
|
||||
* @head: starting point for the iterator
|
||||
*
|
||||
* Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
|
||||
* potential fences in them. If @head is just a normal fence only that one is
|
||||
* returned.
|
||||
*/
|
||||
#define dma_fence_unwrap_for_each(fence, cursor, head) \
|
||||
for (fence = dma_fence_unwrap_first(head, cursor); fence; \
|
||||
fence = dma_fence_unwrap_next(cursor))
|
||||
|
||||
#endif
|
||||
|
|
@ -587,4 +587,42 @@ struct dma_fence *dma_fence_get_stub(void);
|
|||
struct dma_fence *dma_fence_allocate_private_stub(void);
|
||||
u64 dma_fence_context_alloc(unsigned num);
|
||||
|
||||
extern const struct dma_fence_ops dma_fence_array_ops;
|
||||
extern const struct dma_fence_ops dma_fence_chain_ops;
|
||||
|
||||
/**
|
||||
* dma_fence_is_array - check if a fence is from the array subclass
|
||||
* @fence: the fence to test
|
||||
*
|
||||
* Return true if it is a dma_fence_array and false otherwise.
|
||||
*/
|
||||
static inline bool dma_fence_is_array(struct dma_fence *fence)
|
||||
{
|
||||
return fence->ops == &dma_fence_array_ops;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_is_chain - check if a fence is from the chain subclass
|
||||
* @fence: the fence to test
|
||||
*
|
||||
* Return true if it is a dma_fence_chain and false otherwise.
|
||||
*/
|
||||
static inline bool dma_fence_is_chain(struct dma_fence *fence)
|
||||
{
|
||||
return fence->ops == &dma_fence_chain_ops;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_is_container - check if a fence is a container for other fences
|
||||
* @fence: the fence to test
|
||||
*
|
||||
* Return true if this fence is a container for other fences, false otherwise.
|
||||
* This is important since we can't build up large fence structure or otherwise
|
||||
* we run into recursion during operation on those fences.
|
||||
*/
|
||||
static inline bool dma_fence_is_container(struct dma_fence *fence)
|
||||
{
|
||||
return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
|
||||
}
|
||||
|
||||
#endif /* __LINUX_DMA_FENCE_H */
|
||||
|
|
|
|||
|
|
@ -153,6 +153,13 @@ struct dma_resv {
|
|||
* struct dma_resv_iter - current position into the dma_resv fences
|
||||
*
|
||||
* Don't touch this directly in the driver, use the accessor function instead.
|
||||
*
|
||||
* IMPORTANT
|
||||
*
|
||||
* When using the lockless iterators like dma_resv_iter_next_unlocked() or
|
||||
* dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
|
||||
* Code which accumulates statistics or similar needs to check for this with
|
||||
* dma_resv_iter_is_restarted().
|
||||
*/
|
||||
struct dma_resv_iter {
|
||||
/** @obj: The dma_resv object we iterate over */
|
||||
|
|
@ -243,7 +250,11 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
|
|||
* &dma_resv.lock and using RCU instead. The cursor needs to be initialized
|
||||
* with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
|
||||
* the iterator a reference to the dma_fence is held and the RCU lock dropped.
|
||||
* When the dma_resv is modified the iteration starts over again.
|
||||
*
|
||||
* Beware that the iterator can be restarted when the struct dma_resv for
|
||||
* @cursor is modified. Code which accumulates statistics or similar needs to
|
||||
* check for this with dma_resv_iter_is_restarted(). For this reason prefer the
|
||||
* lock iterator dma_resv_for_each_fence() whenever possible.
|
||||
*/
|
||||
#define dma_resv_for_each_fence_unlocked(cursor, fence) \
|
||||
for (fence = dma_resv_iter_first_unlocked(cursor); \
|
||||
|
|
@ -458,8 +469,8 @@ void dma_resv_fini(struct dma_resv *obj);
|
|||
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
|
||||
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
|
||||
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
|
||||
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
|
||||
unsigned *pshared_count, struct dma_fence ***pshared);
|
||||
int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
unsigned int *num_fences, struct dma_fence ***fences);
|
||||
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
|
||||
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
|
||||
unsigned long timeout);
|
||||
|
|
|
|||
|
|
@ -32,31 +32,29 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
|
|||
|
||||
void dsa_tag_8021q_unregister(struct dsa_switch *ds);
|
||||
|
||||
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
|
||||
struct dsa_bridge bridge);
|
||||
|
||||
void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
|
||||
struct dsa_bridge bridge);
|
||||
|
||||
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
|
||||
u16 tpid, u16 tci);
|
||||
|
||||
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id);
|
||||
void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
|
||||
int *vbid);
|
||||
|
||||
int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
|
||||
struct dsa_bridge bridge);
|
||||
struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master,
|
||||
int vbid);
|
||||
|
||||
void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
|
||||
struct dsa_bridge bridge);
|
||||
u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num);
|
||||
|
||||
u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num);
|
||||
|
||||
u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp);
|
||||
|
||||
u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp);
|
||||
u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp);
|
||||
|
||||
int dsa_8021q_rx_switch_id(u16 vid);
|
||||
|
||||
int dsa_8021q_rx_source_port(u16 vid);
|
||||
|
||||
bool vid_is_dsa_8021q_rxvlan(u16 vid);
|
||||
|
||||
bool vid_is_dsa_8021q_txvlan(u16 vid);
|
||||
|
||||
bool vid_is_dsa_8021q(u16 vid);
|
||||
|
||||
#endif /* _NET_DSA_8021Q_H */
|
||||
|
|
|
|||
82
include/linux/dsa/tag_qca.h
Normal file
82
include/linux/dsa/tag_qca.h
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef __TAG_QCA_H
|
||||
#define __TAG_QCA_H
|
||||
|
||||
#define QCA_HDR_LEN 2
|
||||
#define QCA_HDR_VERSION 0x2
|
||||
|
||||
#define QCA_HDR_RECV_VERSION GENMASK(15, 14)
|
||||
#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11)
|
||||
#define QCA_HDR_RECV_TYPE GENMASK(10, 6)
|
||||
#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
|
||||
#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0)
|
||||
|
||||
/* Packet type for recv */
|
||||
#define QCA_HDR_RECV_TYPE_NORMAL 0x0
|
||||
#define QCA_HDR_RECV_TYPE_MIB 0x1
|
||||
#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2
|
||||
|
||||
#define QCA_HDR_XMIT_VERSION GENMASK(15, 14)
|
||||
#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11)
|
||||
#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8)
|
||||
#define QCA_HDR_XMIT_FROM_CPU BIT(7)
|
||||
#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0)
|
||||
|
||||
/* Packet type for xmit */
|
||||
#define QCA_HDR_XMIT_TYPE_NORMAL 0x0
|
||||
#define QCA_HDR_XMIT_TYPE_RW_REG 0x1
|
||||
|
||||
/* Check code for a valid mgmt packet. Switch will ignore the packet
|
||||
* with this wrong.
|
||||
*/
|
||||
#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5
|
||||
|
||||
/* Specific define for in-band MDIO read/write with Ethernet packet */
|
||||
#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */
|
||||
#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */
|
||||
#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */
|
||||
#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \
|
||||
QCA_HDR_MGMT_COMMAND_LEN + \
|
||||
QCA_HDR_MGMT_DATA1_LEN)
|
||||
|
||||
#define QCA_HDR_MGMT_DATA2_LEN 12 /* Other 12 byte for the mdio data */
|
||||
#define QCA_HDR_MGMT_PADDING_LEN 34 /* Padding to reach the min Ethernet packet */
|
||||
|
||||
#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \
|
||||
QCA_HDR_LEN + \
|
||||
QCA_HDR_MGMT_DATA2_LEN + \
|
||||
QCA_HDR_MGMT_PADDING_LEN)
|
||||
|
||||
#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */
|
||||
#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */
|
||||
#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */
|
||||
#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */
|
||||
#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */
|
||||
|
||||
/* Special struct emulating a Ethernet header */
|
||||
struct qca_mgmt_ethhdr {
|
||||
u32 command; /* command bit 31:0 */
|
||||
u32 seq; /* seq 63:32 */
|
||||
u32 mdio_data; /* first 4byte mdio */
|
||||
__be16 hdr; /* qca hdr */
|
||||
} __packed;
|
||||
|
||||
enum mdio_cmd {
|
||||
MDIO_WRITE = 0x0,
|
||||
MDIO_READ
|
||||
};
|
||||
|
||||
struct mib_ethhdr {
|
||||
u32 data[3]; /* first 3 mib counter */
|
||||
__be16 hdr; /* qca hdr */
|
||||
} __packed;
|
||||
|
||||
struct qca_tagger_data {
|
||||
void (*rw_reg_ack_handler)(struct dsa_switch *ds,
|
||||
struct sk_buff *skb);
|
||||
void (*mib_autocast_handler)(struct dsa_switch *ds,
|
||||
struct sk_buff *skb);
|
||||
};
|
||||
|
||||
#endif /* __TAG_QCA_H */
|
||||
|
|
@ -32,28 +32,25 @@ struct dtpm_ops {
|
|||
void (*release)(struct dtpm *);
|
||||
};
|
||||
|
||||
typedef int (*dtpm_init_t)(void);
|
||||
struct device_node;
|
||||
|
||||
struct dtpm_descr {
|
||||
dtpm_init_t init;
|
||||
struct dtpm_subsys_ops {
|
||||
const char *name;
|
||||
int (*init)(void);
|
||||
void (*exit)(void);
|
||||
int (*setup)(struct dtpm *, struct device_node *);
|
||||
};
|
||||
|
||||
/* Init section thermal table */
|
||||
extern struct dtpm_descr __dtpm_table[];
|
||||
extern struct dtpm_descr __dtpm_table_end[];
|
||||
enum DTPM_NODE_TYPE {
|
||||
DTPM_NODE_VIRTUAL = 0,
|
||||
DTPM_NODE_DT,
|
||||
};
|
||||
|
||||
#define DTPM_TABLE_ENTRY(name, __init) \
|
||||
static struct dtpm_descr __dtpm_table_entry_##name \
|
||||
__used __section("__dtpm_table") = { \
|
||||
.init = __init, \
|
||||
}
|
||||
|
||||
#define DTPM_DECLARE(name, init) DTPM_TABLE_ENTRY(name, init)
|
||||
|
||||
#define for_each_dtpm_table(__dtpm) \
|
||||
for (__dtpm = __dtpm_table; \
|
||||
__dtpm < __dtpm_table_end; \
|
||||
__dtpm++)
|
||||
struct dtpm_node {
|
||||
enum DTPM_NODE_TYPE type;
|
||||
const char *name;
|
||||
struct dtpm_node *parent;
|
||||
};
|
||||
|
||||
static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
|
||||
{
|
||||
|
|
@ -70,4 +67,7 @@ void dtpm_unregister(struct dtpm *dtpm);
|
|||
|
||||
int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent);
|
||||
|
||||
int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table);
|
||||
|
||||
void dtpm_destroy_hierarchy(void);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
|
|||
#endif
|
||||
}
|
||||
|
||||
#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64)
|
||||
#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS
|
||||
/*
|
||||
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
|
||||
* extra segments containing the gate DSO contents. Dumping its
|
||||
|
|
@ -149,6 +149,6 @@ static inline size_t elf_core_extra_data_size(void)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS */
|
||||
|
||||
#endif /* _LINUX_ELFCORE_H */
|
||||
|
|
|
|||
|
|
@ -116,6 +116,7 @@ struct em_data_callback {
|
|||
struct device *dev);
|
||||
};
|
||||
#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
|
||||
#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
|
||||
|
||||
struct em_perf_domain *em_cpu_get(int cpu);
|
||||
struct em_perf_domain *em_pd_get(struct device *dev);
|
||||
|
|
@ -264,6 +265,7 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
|
|||
#else
|
||||
struct em_data_callback {};
|
||||
#define EM_DATA_CB(_active_power_cb) { }
|
||||
#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
|
||||
|
||||
static inline
|
||||
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
#define __LINUX_ENTRYCOMMON_H
|
||||
|
||||
#include <linux/static_call_types.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/sched.h>
|
||||
|
|
@ -79,26 +79,6 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs);
|
|||
static __always_inline void arch_check_user_regs(struct pt_regs *regs) {}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry()
|
||||
* @regs: Pointer to currents pt_regs
|
||||
*
|
||||
* Returns: 0 on success or an error code to skip the syscall.
|
||||
*
|
||||
* Defaults to tracehook_report_syscall_entry(). Can be replaced by
|
||||
* architecture specific code.
|
||||
*
|
||||
* Invoked from syscall_enter_from_user_mode()
|
||||
*/
|
||||
static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs);
|
||||
|
||||
#ifndef arch_syscall_enter_tracehook
|
||||
static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs)
|
||||
{
|
||||
return tracehook_report_syscall_entry(regs);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* enter_from_user_mode - Establish state when coming from user mode
|
||||
*
|
||||
|
|
@ -157,7 +137,7 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
|
|||
* It handles the following work items:
|
||||
*
|
||||
* 1) syscall_work flag dependent invocations of
|
||||
* arch_syscall_enter_tracehook(), __secure_computing(), trace_sys_enter()
|
||||
* ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
|
||||
* 2) Invocation of audit_syscall_entry()
|
||||
*/
|
||||
long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
|
||||
|
|
@ -277,26 +257,7 @@ static __always_inline void arch_exit_to_user_mode(void) { }
|
|||
*
|
||||
* Invoked from exit_to_user_mode_loop().
|
||||
*/
|
||||
void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal);
|
||||
|
||||
/**
|
||||
* arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit()
|
||||
* @regs: Pointer to currents pt_regs
|
||||
* @step: Indicator for single step
|
||||
*
|
||||
* Defaults to tracehook_report_syscall_exit(). Can be replaced by
|
||||
* architecture specific code.
|
||||
*
|
||||
* Invoked from syscall_exit_to_user_mode()
|
||||
*/
|
||||
static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step);
|
||||
|
||||
#ifndef arch_syscall_exit_tracehook
|
||||
static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
|
||||
{
|
||||
tracehook_report_syscall_exit(regs, step);
|
||||
}
|
||||
#endif
|
||||
void arch_do_signal_or_restart(struct pt_regs *regs);
|
||||
|
||||
/**
|
||||
* exit_to_user_mode - Fixup state when exiting to user mode
|
||||
|
|
@ -347,7 +308,7 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs);
|
|||
* - rseq syscall exit
|
||||
* - audit
|
||||
* - syscall tracing
|
||||
* - tracehook (single stepping)
|
||||
* - ptrace (single stepping)
|
||||
*
|
||||
* 2) Preparatory work
|
||||
* - Exit to user mode loop (common TIF handling). Invokes
|
||||
|
|
@ -454,10 +415,21 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
|
|||
*
|
||||
* Conditional reschedule with additional sanity checks.
|
||||
*/
|
||||
void irqentry_exit_cond_resched(void);
|
||||
void raw_irqentry_exit_cond_resched(void);
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
||||
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
|
||||
#define irqentry_exit_cond_resched_dynamic_disabled NULL
|
||||
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
|
||||
#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
|
||||
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
||||
void dynamic_irqentry_exit_cond_resched(void);
|
||||
#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
|
||||
#endif
|
||||
#else /* CONFIG_PREEMPT_DYNAMIC */
|
||||
#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
|
||||
#endif /* CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
/**
|
||||
* irqentry_exit - Handle return from exception that used irqentry_enter()
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
#define __LINUX_ENTRYKVM_H
|
||||
|
||||
#include <linux/static_call_types.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/resume_user_mode.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/sched.h>
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
|
||||
static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
|
||||
#ifdef __BIG_ENDIAN
|
||||
|
|
@ -372,8 +372,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
|
|||
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
|
||||
*/
|
||||
|
||||
static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
|
||||
const u8 addr2[6+2])
|
||||
static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
|
||||
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
|
||||
|
|
|
|||
|
|
@ -70,17 +70,23 @@ enum {
|
|||
/**
|
||||
* struct kernel_ethtool_ringparam - RX/TX ring configuration
|
||||
* @rx_buf_len: Current length of buffers on the rx ring.
|
||||
* @tcp_data_split: Scatter packet headers and data to separate buffers
|
||||
* @cqe_size: Size of TX/RX completion queue event
|
||||
*/
|
||||
struct kernel_ethtool_ringparam {
|
||||
u32 rx_buf_len;
|
||||
u8 tcp_data_split;
|
||||
u32 cqe_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum ethtool_supported_ring_param - indicator caps for setting ring params
|
||||
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
|
||||
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
|
||||
*/
|
||||
enum ethtool_supported_ring_param {
|
||||
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
|
||||
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
|
||||
};
|
||||
|
||||
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
|
||||
|
|
@ -111,7 +117,7 @@ struct ethtool_link_ext_state_info {
|
|||
enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
|
||||
enum ethtool_link_ext_substate_cable_issue cable_issue;
|
||||
enum ethtool_link_ext_substate_module module;
|
||||
u8 __link_ext_substate;
|
||||
u32 __link_ext_substate;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
|
|||
|
||||
struct kmem_cache;
|
||||
|
||||
bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
|
||||
|
||||
int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
|
||||
#ifdef CONFIG_FAILSLAB
|
||||
extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
|
||||
|
|
|
|||
|
|
@ -204,6 +204,7 @@ struct fb_pixmap {
|
|||
struct fb_deferred_io {
|
||||
/* delay between mkwrite and deferred handler */
|
||||
unsigned long delay;
|
||||
bool sort_pagelist; /* sort pagelist by offset */
|
||||
struct mutex lock; /* mutex that protects the page list */
|
||||
struct list_head pagelist; /* list of touched pages */
|
||||
/* callback */
|
||||
|
|
@ -262,7 +263,7 @@ struct fb_ops {
|
|||
|
||||
/* Draws a rectangle */
|
||||
void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect);
|
||||
/* Copy data from area to another. Obsolete. */
|
||||
/* Copy data from area to another */
|
||||
void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region);
|
||||
/* Draws a image to the display */
|
||||
void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image);
|
||||
|
|
@ -502,6 +503,7 @@ struct fb_info {
|
|||
} *apertures;
|
||||
|
||||
bool skip_vt_switch; /* no VT switch on suspend/resume required */
|
||||
bool forced_out; /* set when being removed by another driver */
|
||||
};
|
||||
|
||||
static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
|
||||
|
|
|
|||
|
|
@ -548,7 +548,7 @@ struct sock_fprog_kern {
|
|||
#define BPF_IMAGE_ALIGNMENT 8
|
||||
|
||||
struct bpf_binary_header {
|
||||
u32 pages;
|
||||
u32 size;
|
||||
u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
|
||||
};
|
||||
|
||||
|
|
@ -566,13 +566,15 @@ struct bpf_prog {
|
|||
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||
cb_access:1, /* Is control block accessed? */
|
||||
dst_needed:1, /* Do we need dst entry? */
|
||||
blinding_requested:1, /* needs constant blinding */
|
||||
blinded:1, /* Was blinded */
|
||||
is_func:1, /* program is a bpf function */
|
||||
kprobe_override:1, /* Do we override a kprobe? */
|
||||
has_callchain_buf:1, /* callchain buffer allocated? */
|
||||
enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
|
||||
call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
|
||||
call_get_func_ip:1; /* Do we call get_func_ip() */
|
||||
call_get_func_ip:1, /* Do we call get_func_ip() */
|
||||
tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
enum bpf_attach_type expected_attach_type; /* For some prog types */
|
||||
u32 len; /* Number of filter blocks */
|
||||
|
|
@ -886,17 +888,8 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
|
|||
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
|
||||
{
|
||||
set_vm_flush_reset_perms(hdr);
|
||||
set_memory_ro((unsigned long)hdr, hdr->pages);
|
||||
set_memory_x((unsigned long)hdr, hdr->pages);
|
||||
}
|
||||
|
||||
static inline struct bpf_binary_header *
|
||||
bpf_jit_binary_hdr(const struct bpf_prog *fp)
|
||||
{
|
||||
unsigned long real_start = (unsigned long)fp->bpf_func;
|
||||
unsigned long addr = real_start & PAGE_MASK;
|
||||
|
||||
return (void *)addr;
|
||||
set_memory_ro((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
|
||||
set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
|
||||
|
|
@ -1068,6 +1061,18 @@ void *bpf_jit_alloc_exec(unsigned long size);
|
|||
void bpf_jit_free_exec(void *addr);
|
||||
void bpf_jit_free(struct bpf_prog *fp);
|
||||
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
|
||||
unsigned int alignment,
|
||||
struct bpf_binary_header **rw_hdr,
|
||||
u8 **rw_image,
|
||||
bpf_jit_fill_hole_t bpf_fill_ill_insns);
|
||||
int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
|
||||
struct bpf_binary_header *ro_header,
|
||||
struct bpf_binary_header *rw_header);
|
||||
void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
|
||||
struct bpf_binary_header *rw_header);
|
||||
|
||||
int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
|
||||
struct bpf_jit_poke_descriptor *poke);
|
||||
|
||||
|
|
@ -1356,7 +1361,10 @@ struct bpf_sockopt_kern {
|
|||
s32 level;
|
||||
s32 optname;
|
||||
s32 optlen;
|
||||
s32 retval;
|
||||
/* for retval in struct bpf_cg_run_ctx */
|
||||
struct task_struct *current_task;
|
||||
/* Temporary "register" for indirect stores to ppos. */
|
||||
u64 tmp_reg;
|
||||
};
|
||||
|
||||
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ static inline bool firmware_request_builtin(struct firmware *fw,
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
|
||||
#if IS_REACHABLE(CONFIG_FW_LOADER)
|
||||
int request_firmware(const struct firmware **fw, const char *name,
|
||||
struct device *device);
|
||||
int firmware_request_nowarn(const struct firmware **fw, const char *name,
|
||||
|
|
|
|||
|
|
@ -59,11 +59,16 @@ enum imx_sc_rm_func {
|
|||
|
||||
#if IS_ENABLED(CONFIG_IMX_SCU)
|
||||
bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
|
||||
int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt);
|
||||
#else
|
||||
static inline bool
|
||||
imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -321,8 +321,6 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
|
|||
#define INTEL_SIP_SMC_ECC_DBE \
|
||||
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Request INTEL_SIP_SMC_RSU_NOTIFY
|
||||
*
|
||||
|
|
@ -404,3 +402,22 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
|
|||
#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18
|
||||
#define INTEL_SIP_SMC_RSU_MAX_RETRY \
|
||||
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY)
|
||||
|
||||
/**
|
||||
* Request INTEL_SIP_SMC_FIRMWARE_VERSION
|
||||
*
|
||||
* Sync call used to query the version of running firmware
|
||||
*
|
||||
* Call register usage:
|
||||
* a0 INTEL_SIP_SMC_FIRMWARE_VERSION
|
||||
* a1-a7 not used
|
||||
*
|
||||
* Return status:
|
||||
* a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR
|
||||
* a1 running firmware version
|
||||
*/
|
||||
#define INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION 31
|
||||
#define INTEL_SIP_SMC_FIRMWARE_VERSION \
|
||||
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -104,6 +104,9 @@ struct stratix10_svc_chan;
|
|||
*
|
||||
* @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status
|
||||
* is SVC_STATUS_OK or SVC_STATUS_ERROR
|
||||
*
|
||||
* @COMMAND_FIRMWARE_VERSION: query running firmware version, return status
|
||||
* is SVC_STATUS_OK or SVC_STATUS_ERROR
|
||||
*/
|
||||
enum stratix10_svc_command_code {
|
||||
COMMAND_NOOP = 0,
|
||||
|
|
@ -117,6 +120,7 @@ enum stratix10_svc_command_code {
|
|||
COMMAND_RSU_RETRY,
|
||||
COMMAND_RSU_MAX_RETRY,
|
||||
COMMAND_RSU_DCMF_VERSION,
|
||||
COMMAND_FIRMWARE_VERSION,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -93,6 +93,7 @@ enum pm_api_id {
|
|||
PM_FPGA_LOAD = 22,
|
||||
PM_FPGA_GET_STATUS = 23,
|
||||
PM_GET_CHIPID = 24,
|
||||
PM_SECURE_SHA = 26,
|
||||
PM_PINCTRL_REQUEST = 28,
|
||||
PM_PINCTRL_RELEASE = 29,
|
||||
PM_PINCTRL_GET_FUNCTION = 30,
|
||||
|
|
@ -143,6 +144,9 @@ enum pm_ioctl_id {
|
|||
IOCTL_OSPI_MUX_SELECT = 21,
|
||||
/* Register SGI to ATF */
|
||||
IOCTL_REGISTER_SGI = 25,
|
||||
/* Runtime feature configuration */
|
||||
IOCTL_SET_FEATURE_CONFIG = 26,
|
||||
IOCTL_GET_FEATURE_CONFIG = 27,
|
||||
};
|
||||
|
||||
enum pm_query_id {
|
||||
|
|
@ -376,6 +380,14 @@ enum ospi_mux_select_type {
|
|||
PM_OSPI_MUX_SEL_LINEAR = 1,
|
||||
};
|
||||
|
||||
enum pm_feature_config_id {
|
||||
PM_FEATURE_INVALID = 0,
|
||||
PM_FEATURE_OVERTEMP_STATUS = 1,
|
||||
PM_FEATURE_OVERTEMP_VALUE = 2,
|
||||
PM_FEATURE_EXTWDT_STATUS = 3,
|
||||
PM_FEATURE_EXTWDT_VALUE = 4,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct zynqmp_pm_query_data - PM query data
|
||||
* @qid: query ID
|
||||
|
|
@ -427,6 +439,7 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
|
|||
const u32 qos,
|
||||
const enum zynqmp_pm_request_ack ack);
|
||||
int zynqmp_pm_aes_engine(const u64 address, u32 *out);
|
||||
int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
|
||||
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
|
||||
int zynqmp_pm_fpga_get_status(u32 *value);
|
||||
int zynqmp_pm_write_ggs(u32 index, u32 value);
|
||||
|
|
@ -447,6 +460,8 @@ int zynqmp_pm_load_pdi(const u32 src, const u64 address);
|
|||
int zynqmp_pm_register_notifier(const u32 node, const u32 event,
|
||||
const u32 wake, const u32 enable);
|
||||
int zynqmp_pm_feature(const u32 api_id);
|
||||
int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
|
||||
int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
|
||||
#else
|
||||
static inline int zynqmp_pm_get_api_version(u32 *version)
|
||||
{
|
||||
|
|
@ -601,6 +616,12 @@ static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
|
||||
const u32 flags)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
|
||||
const u32 flags)
|
||||
{
|
||||
|
|
@ -689,6 +710,18 @@ static inline int zynqmp_pm_feature(const u32 api_id)
|
|||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int zynqmp_pm_set_feature_config(enum pm_feature_config_id id,
|
||||
u32 value)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
|
||||
u32 *payload)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __FIRMWARE_ZYNQMP_H__ */
|
||||
|
|
|
|||
|
|
@ -2,13 +2,17 @@
|
|||
#ifndef _LINUX_FORTIFY_STRING_H_
|
||||
#define _LINUX_FORTIFY_STRING_H_
|
||||
|
||||
#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
|
||||
#include <linux/const.h>
|
||||
|
||||
#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
|
||||
#define __RENAME(x) __asm__(#x)
|
||||
|
||||
void fortify_panic(const char *name) __noreturn __cold;
|
||||
void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)");
|
||||
void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)");
|
||||
void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?");
|
||||
void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)");
|
||||
void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?");
|
||||
|
||||
#define __compiletime_strlen(p) \
|
||||
({ \
|
||||
|
|
@ -48,7 +52,17 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
|
|||
#define __underlying_strncpy __builtin_strncpy
|
||||
#endif
|
||||
|
||||
__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
|
||||
/*
|
||||
* Clang's use of __builtin_object_size() within inlines needs hinting via
|
||||
* __pass_object_size(). The preference is to only ever use type 1 (member
|
||||
* size, rather than struct size), but there remain some stragglers using
|
||||
* type 0 that will be converted in the future.
|
||||
*/
|
||||
#define POS __pass_object_size(1)
|
||||
#define POS0 __pass_object_size(0)
|
||||
|
||||
__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
|
||||
char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 1);
|
||||
|
||||
|
|
@ -59,7 +73,8 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
|
|||
return __underlying_strncpy(p, q, size);
|
||||
}
|
||||
|
||||
__FORTIFY_INLINE char *strcat(char *p, const char *q)
|
||||
__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
|
||||
char *strcat(char * const POS p, const char *q)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 1);
|
||||
|
||||
|
|
@ -71,7 +86,7 @@ __FORTIFY_INLINE char *strcat(char *p, const char *q)
|
|||
}
|
||||
|
||||
extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
|
||||
__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
|
||||
__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 1);
|
||||
size_t p_len = __compiletime_strlen(p);
|
||||
|
|
@ -91,8 +106,16 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* defined after fortified strnlen to reuse it. */
|
||||
__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
|
||||
/*
|
||||
* Defined after fortified strnlen to reuse it. However, it must still be
|
||||
* possible for strlen() to be used on compile-time strings for use in
|
||||
* static initializers (i.e. as a constant expression).
|
||||
*/
|
||||
#define strlen(p) \
|
||||
__builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \
|
||||
__builtin_strlen(p), __fortify_strlen(p))
|
||||
__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
|
||||
__kernel_size_t __fortify_strlen(const char * const POS p)
|
||||
{
|
||||
__kernel_size_t ret;
|
||||
size_t p_size = __builtin_object_size(p, 1);
|
||||
|
|
@ -108,7 +131,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
|
|||
|
||||
/* defined after fortified strlen to reuse it */
|
||||
extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
|
||||
__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
|
||||
__FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 1);
|
||||
size_t q_size = __builtin_object_size(q, 1);
|
||||
|
|
@ -135,7 +158,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
|
|||
|
||||
/* defined after fortified strnlen to reuse it */
|
||||
extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
|
||||
__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
|
||||
__FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size)
|
||||
{
|
||||
size_t len;
|
||||
/* Use string size rather than possible enclosing struct size. */
|
||||
|
|
@ -181,7 +204,8 @@ __FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
|
|||
}
|
||||
|
||||
/* defined after fortified strlen and strnlen to reuse them */
|
||||
__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
|
||||
__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
|
||||
char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
|
||||
{
|
||||
size_t p_len, copy_len;
|
||||
size_t p_size = __builtin_object_size(p, 1);
|
||||
|
|
@ -198,51 +222,161 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
|
|||
return p;
|
||||
}
|
||||
|
||||
__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
|
||||
__FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
|
||||
const size_t p_size,
|
||||
const size_t p_size_field)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
|
||||
if (__builtin_constant_p(size) && p_size < size)
|
||||
__write_overflow();
|
||||
if (p_size < size)
|
||||
fortify_panic(__func__);
|
||||
return __underlying_memset(p, c, size);
|
||||
}
|
||||
|
||||
__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
size_t q_size = __builtin_object_size(q, 0);
|
||||
|
||||
if (__builtin_constant_p(size)) {
|
||||
if (p_size < size)
|
||||
/*
|
||||
* Length argument is a constant expression, so we
|
||||
* can perform compile-time bounds checking where
|
||||
* buffer sizes are known.
|
||||
*/
|
||||
|
||||
/* Error when size is larger than enclosing struct. */
|
||||
if (p_size > p_size_field && p_size < size)
|
||||
__write_overflow();
|
||||
if (q_size < size)
|
||||
__read_overflow2();
|
||||
|
||||
/* Warn when write size is larger than dest field. */
|
||||
if (p_size_field < size)
|
||||
__write_overflow_field(p_size_field, size);
|
||||
}
|
||||
if (p_size < size || q_size < size)
|
||||
fortify_panic(__func__);
|
||||
return __underlying_memcpy(p, q, size);
|
||||
/*
|
||||
* At this point, length argument may not be a constant expression,
|
||||
* so run-time bounds checking can be done where buffer sizes are
|
||||
* known. (This is not an "else" because the above checks may only
|
||||
* be compile-time warnings, and we want to still warn for run-time
|
||||
* overflows.)
|
||||
*/
|
||||
|
||||
/*
|
||||
* Always stop accesses beyond the struct that contains the
|
||||
* field, when the buffer's remaining size is known.
|
||||
* (The -1 test is to optimize away checks where the buffer
|
||||
* lengths are unknown.)
|
||||
*/
|
||||
if (p_size != (size_t)(-1) && p_size < size)
|
||||
fortify_panic("memset");
|
||||
}
|
||||
|
||||
__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
|
||||
#define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \
|
||||
size_t __fortify_size = (size_t)(size); \
|
||||
fortify_memset_chk(__fortify_size, p_size, p_size_field), \
|
||||
__underlying_memset(p, c, __fortify_size); \
|
||||
})
|
||||
|
||||
/*
|
||||
* __builtin_object_size() must be captured here to avoid evaluating argument
|
||||
* side-effects further into the macro layers.
|
||||
*/
|
||||
#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
|
||||
__builtin_object_size(p, 0), __builtin_object_size(p, 1))
|
||||
|
||||
/*
|
||||
* To make sure the compiler can enforce protection against buffer overflows,
|
||||
* memcpy(), memmove(), and memset() must not be used beyond individual
|
||||
* struct members. If you need to copy across multiple members, please use
|
||||
* struct_group() to create a named mirror of an anonymous struct union.
|
||||
* (e.g. see struct sk_buff.) Read overflow checking is currently only
|
||||
* done when a write overflow is also present, or when building with W=1.
|
||||
*
|
||||
* Mitigation coverage matrix
|
||||
* Bounds checking at:
|
||||
* +-------+-------+-------+-------+
|
||||
* | Compile time | Run time |
|
||||
* memcpy() argument sizes: | write | read | write | read |
|
||||
* dest source length +-------+-------+-------+-------+
|
||||
* memcpy(known, known, constant) | y | y | n/a | n/a |
|
||||
* memcpy(known, unknown, constant) | y | n | n/a | V |
|
||||
* memcpy(known, known, dynamic) | n | n | B | B |
|
||||
* memcpy(known, unknown, dynamic) | n | n | B | V |
|
||||
* memcpy(unknown, known, constant) | n | y | V | n/a |
|
||||
* memcpy(unknown, unknown, constant) | n | n | V | V |
|
||||
* memcpy(unknown, known, dynamic) | n | n | V | B |
|
||||
* memcpy(unknown, unknown, dynamic) | n | n | V | V |
|
||||
* +-------+-------+-------+-------+
|
||||
*
|
||||
* y = perform deterministic compile-time bounds checking
|
||||
* n = cannot perform deterministic compile-time bounds checking
|
||||
* n/a = no run-time bounds checking needed since compile-time deterministic
|
||||
* B = can perform run-time bounds checking (currently unimplemented)
|
||||
* V = vulnerable to run-time overflow (will need refactoring to solve)
|
||||
*
|
||||
*/
|
||||
__FORTIFY_INLINE void fortify_memcpy_chk(__kernel_size_t size,
|
||||
const size_t p_size,
|
||||
const size_t q_size,
|
||||
const size_t p_size_field,
|
||||
const size_t q_size_field,
|
||||
const char *func)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
size_t q_size = __builtin_object_size(q, 0);
|
||||
|
||||
if (__builtin_constant_p(size)) {
|
||||
if (p_size < size)
|
||||
/*
|
||||
* Length argument is a constant expression, so we
|
||||
* can perform compile-time bounds checking where
|
||||
* buffer sizes are known.
|
||||
*/
|
||||
|
||||
/* Error when size is larger than enclosing struct. */
|
||||
if (p_size > p_size_field && p_size < size)
|
||||
__write_overflow();
|
||||
if (q_size < size)
|
||||
if (q_size > q_size_field && q_size < size)
|
||||
__read_overflow2();
|
||||
|
||||
/* Warn when write size argument larger than dest field. */
|
||||
if (p_size_field < size)
|
||||
__write_overflow_field(p_size_field, size);
|
||||
/*
|
||||
* Warn for source field over-read when building with W=1
|
||||
* or when an over-write happened, so both can be fixed at
|
||||
* the same time.
|
||||
*/
|
||||
if ((IS_ENABLED(KBUILD_EXTRA_WARN1) || p_size_field < size) &&
|
||||
q_size_field < size)
|
||||
__read_overflow2_field(q_size_field, size);
|
||||
}
|
||||
if (p_size < size || q_size < size)
|
||||
fortify_panic(__func__);
|
||||
return __underlying_memmove(p, q, size);
|
||||
/*
|
||||
* At this point, length argument may not be a constant expression,
|
||||
* so run-time bounds checking can be done where buffer sizes are
|
||||
* known. (This is not an "else" because the above checks may only
|
||||
* be compile-time warnings, and we want to still warn for run-time
|
||||
* overflows.)
|
||||
*/
|
||||
|
||||
/*
|
||||
* Always stop accesses beyond the struct that contains the
|
||||
* field, when the buffer's remaining size is known.
|
||||
* (The -1 test is to optimize away checks where the buffer
|
||||
* lengths are unknown.)
|
||||
*/
|
||||
if ((p_size != (size_t)(-1) && p_size < size) ||
|
||||
(q_size != (size_t)(-1) && q_size < size))
|
||||
fortify_panic(func);
|
||||
}
|
||||
|
||||
#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
|
||||
p_size_field, q_size_field, op) ({ \
|
||||
size_t __fortify_size = (size_t)(size); \
|
||||
fortify_memcpy_chk(__fortify_size, p_size, q_size, \
|
||||
p_size_field, q_size_field, #op); \
|
||||
__underlying_##op(p, q, __fortify_size); \
|
||||
})
|
||||
|
||||
/*
|
||||
* __builtin_object_size() must be captured here to avoid evaluating argument
|
||||
* side-effects further into the macro layers.
|
||||
*/
|
||||
#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
|
||||
__builtin_object_size(p, 0), __builtin_object_size(q, 0), \
|
||||
__builtin_object_size(p, 1), __builtin_object_size(q, 1), \
|
||||
memcpy)
|
||||
#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
|
||||
__builtin_object_size(p, 0), __builtin_object_size(q, 0), \
|
||||
__builtin_object_size(p, 1), __builtin_object_size(q, 1), \
|
||||
memmove)
|
||||
|
||||
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
|
||||
__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
|
||||
__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
|
||||
|
|
@ -253,7 +387,8 @@ __FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
|
|||
return __real_memscan(p, c, size);
|
||||
}
|
||||
|
||||
__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
|
||||
__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
|
||||
int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
size_t q_size = __builtin_object_size(q, 0);
|
||||
|
|
@ -269,7 +404,8 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
|
|||
return __underlying_memcmp(p, q, size);
|
||||
}
|
||||
|
||||
__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
|
||||
__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
|
||||
void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
|
||||
|
|
@ -281,7 +417,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
|
|||
}
|
||||
|
||||
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
|
||||
__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
|
||||
__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
|
||||
|
|
@ -293,7 +429,7 @@ __FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
|
|||
}
|
||||
|
||||
extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
|
||||
__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
|
||||
__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 0);
|
||||
|
||||
|
|
@ -304,13 +440,15 @@ __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
|
|||
return __real_kmemdup(p, size, gfp);
|
||||
}
|
||||
|
||||
/* defined after fortified strlen and memcpy to reuse them */
|
||||
__FORTIFY_INLINE char *strcpy(char *p, const char *q)
|
||||
/* Defined after fortified strlen to reuse it. */
|
||||
__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
|
||||
char *strcpy(char * const POS p, const char * const POS q)
|
||||
{
|
||||
size_t p_size = __builtin_object_size(p, 1);
|
||||
size_t q_size = __builtin_object_size(q, 1);
|
||||
size_t size;
|
||||
|
||||
/* If neither buffer size is known, immediately give up. */
|
||||
if (p_size == (size_t)-1 && q_size == (size_t)-1)
|
||||
return __underlying_strcpy(p, q);
|
||||
size = strlen(q) + 1;
|
||||
|
|
@ -320,20 +458,20 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q)
|
|||
/* Run-time check for dynamic size overflow. */
|
||||
if (p_size < size)
|
||||
fortify_panic(__func__);
|
||||
memcpy(p, q, size);
|
||||
__underlying_memcpy(p, q, size);
|
||||
return p;
|
||||
}
|
||||
|
||||
/* Don't use these outside the FORITFY_SOURCE implementation */
|
||||
#undef __underlying_memchr
|
||||
#undef __underlying_memcmp
|
||||
#undef __underlying_memcpy
|
||||
#undef __underlying_memmove
|
||||
#undef __underlying_memset
|
||||
#undef __underlying_strcat
|
||||
#undef __underlying_strcpy
|
||||
#undef __underlying_strlen
|
||||
#undef __underlying_strncat
|
||||
#undef __underlying_strncpy
|
||||
|
||||
#undef POS
|
||||
#undef POS0
|
||||
|
||||
#endif /* _LINUX_FORTIFY_STRING_H_ */
|
||||
|
|
|
|||
105
include/linux/fprobe.h
Normal file
105
include/linux/fprobe.h
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Simple ftrace probe wrapper */
|
||||
#ifndef _LINUX_FPROBE_H
|
||||
#define _LINUX_FPROBE_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/rethook.h>
|
||||
|
||||
/**
|
||||
* struct fprobe - ftrace based probe.
|
||||
* @ops: The ftrace_ops.
|
||||
* @nmissed: The counter for missing events.
|
||||
* @flags: The status flag.
|
||||
* @rethook: The rethook data structure. (internal data)
|
||||
* @entry_handler: The callback function for function entry.
|
||||
* @exit_handler: The callback function for function exit.
|
||||
*/
|
||||
struct fprobe {
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
/*
|
||||
* If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too.
|
||||
* But user of fprobe may keep embedding the struct fprobe on their own
|
||||
* code. To avoid build error, this will keep the fprobe data structure
|
||||
* defined here, but remove ftrace_ops data structure.
|
||||
*/
|
||||
struct ftrace_ops ops;
|
||||
#endif
|
||||
unsigned long nmissed;
|
||||
unsigned int flags;
|
||||
struct rethook *rethook;
|
||||
|
||||
void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
|
||||
void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs);
|
||||
};
|
||||
|
||||
/* This fprobe is soft-disabled. */
|
||||
#define FPROBE_FL_DISABLED 1
|
||||
|
||||
/*
|
||||
* This fprobe handler will be shared with kprobes.
|
||||
* This flag must be set before registering.
|
||||
*/
|
||||
#define FPROBE_FL_KPROBE_SHARED 2
|
||||
|
||||
static inline bool fprobe_disabled(struct fprobe *fp)
|
||||
{
|
||||
return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
|
||||
}
|
||||
|
||||
static inline bool fprobe_shared_with_kprobes(struct fprobe *fp)
|
||||
{
|
||||
return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FPROBE
|
||||
int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
|
||||
int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
|
||||
int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
|
||||
int unregister_fprobe(struct fprobe *fp);
|
||||
#else
|
||||
static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline int unregister_fprobe(struct fprobe *fp)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* disable_fprobe() - Disable fprobe
|
||||
* @fp: The fprobe to be disabled.
|
||||
*
|
||||
* This will soft-disable @fp. Note that this doesn't remove the ftrace
|
||||
* hooks from the function entry.
|
||||
*/
|
||||
static inline void disable_fprobe(struct fprobe *fp)
|
||||
{
|
||||
if (fp)
|
||||
fp->flags |= FPROBE_FL_DISABLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* enable_fprobe() - Enable fprobe
|
||||
* @fp: The fprobe to be enabled.
|
||||
*
|
||||
* This will soft-enable @fp.
|
||||
*/
|
||||
static inline void enable_fprobe(struct fprobe *fp)
|
||||
{
|
||||
if (fp)
|
||||
fp->flags &= ~FPROBE_FL_DISABLED;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -42,6 +42,7 @@
|
|||
#include <linux/mount.h>
|
||||
#include <linux/cred.h>
|
||||
#include <linux/mnt_idmapping.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
#include <uapi/linux/fs.h>
|
||||
|
|
@ -274,7 +275,6 @@ enum positive_aop_returns {
|
|||
AOP_TRUNCATED_PAGE = 0x80001,
|
||||
};
|
||||
|
||||
#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
|
||||
#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
|
||||
* helper code (eg buffer layer)
|
||||
* to clear GFP_FS from alloc */
|
||||
|
|
@ -327,7 +327,6 @@ struct kiocb {
|
|||
void (*ki_complete)(struct kiocb *iocb, long ret);
|
||||
void *private;
|
||||
int ki_flags;
|
||||
u16 ki_hint;
|
||||
u16 ki_ioprio; /* See linux/ioprio.h */
|
||||
struct wait_page_queue *ki_waitq; /* for async buffered IO */
|
||||
randomized_struct_fields_end
|
||||
|
|
@ -338,28 +337,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
|
|||
return kiocb->ki_complete == NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* "descriptor" for what we're up to with a read.
|
||||
* This allows us to use the same read code yet
|
||||
* have multiple different users of the data that
|
||||
* we read from a file.
|
||||
*
|
||||
* The simplest case just copies the data to user
|
||||
* mode.
|
||||
*/
|
||||
typedef struct {
|
||||
size_t written;
|
||||
size_t count;
|
||||
union {
|
||||
char __user *buf;
|
||||
void *data;
|
||||
} arg;
|
||||
int error;
|
||||
} read_descriptor_t;
|
||||
|
||||
typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
|
||||
unsigned long, unsigned long);
|
||||
|
||||
struct address_space_operations {
|
||||
int (*writepage)(struct page *page, struct writeback_control *wbc);
|
||||
int (*readpage)(struct file *, struct page *);
|
||||
|
|
@ -367,15 +344,9 @@ struct address_space_operations {
|
|||
/* Write back some dirty pages from this mapping. */
|
||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||
|
||||
/* Set a page dirty. Return true if this dirtied it */
|
||||
int (*set_page_dirty)(struct page *page);
|
||||
/* Mark a folio dirty. Return true if this dirtied it */
|
||||
bool (*dirty_folio)(struct address_space *, struct folio *);
|
||||
|
||||
/*
|
||||
* Reads in the requested pages. Unlike ->readpage(), this is
|
||||
* PURELY used for read-ahead!.
|
||||
*/
|
||||
int (*readpages)(struct file *filp, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages);
|
||||
void (*readahead)(struct readahead_control *);
|
||||
|
||||
int (*write_begin)(struct file *, struct address_space *mapping,
|
||||
|
|
@ -387,7 +358,7 @@ struct address_space_operations {
|
|||
|
||||
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
|
||||
sector_t (*bmap)(struct address_space *, sector_t);
|
||||
void (*invalidatepage) (struct page *, unsigned int, unsigned int);
|
||||
void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
|
||||
int (*releasepage) (struct page *, gfp_t);
|
||||
void (*freepage)(struct page *);
|
||||
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
|
||||
|
|
@ -399,9 +370,9 @@ struct address_space_operations {
|
|||
struct page *, struct page *, enum migrate_mode);
|
||||
bool (*isolate_page)(struct page *, isolate_mode_t);
|
||||
void (*putback_page)(struct page *);
|
||||
int (*launder_page) (struct page *);
|
||||
int (*is_partially_uptodate) (struct page *, unsigned long,
|
||||
unsigned long);
|
||||
int (*launder_folio)(struct folio *);
|
||||
bool (*is_partially_uptodate) (struct folio *, size_t from,
|
||||
size_t count);
|
||||
void (*is_dirty_writeback) (struct page *, bool *, bool *);
|
||||
int (*error_remove_page)(struct address_space *, struct page *);
|
||||
|
||||
|
|
@ -930,10 +901,15 @@ struct fown_struct {
|
|||
* struct file_ra_state - Track a file's readahead state.
|
||||
* @start: Where the most recent readahead started.
|
||||
* @size: Number of pages read in the most recent readahead.
|
||||
* @async_size: Start next readahead when this many pages are left.
|
||||
* @ra_pages: Maximum size of a readahead request.
|
||||
* @async_size: Numer of pages that were/are not needed immediately
|
||||
* and so were/are genuinely "ahead". Start next readahead when
|
||||
* the first of these pages is accessed.
|
||||
* @ra_pages: Maximum size of a readahead request, copied from the bdi.
|
||||
* @mmap_miss: How many mmap accesses missed in the page cache.
|
||||
* @prev_pos: The last byte in the most recent read request.
|
||||
*
|
||||
* When this structure is passed to ->readahead(), the "most recent"
|
||||
* readahead means the current readahead.
|
||||
*/
|
||||
struct file_ra_state {
|
||||
pgoff_t start;
|
||||
|
|
@ -967,7 +943,6 @@ struct file {
|
|||
* Must not be taken from IRQ context.
|
||||
*/
|
||||
spinlock_t f_lock;
|
||||
enum rw_hint f_write_hint;
|
||||
atomic_long_t f_count;
|
||||
unsigned int f_flags;
|
||||
fmode_t f_mode;
|
||||
|
|
@ -1435,6 +1410,7 @@ extern int send_sigurg(struct fown_struct *fown);
|
|||
|
||||
#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
|
||||
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
|
||||
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
|
||||
|
||||
/* Possible states of 'frozen' field */
|
||||
enum {
|
||||
|
|
@ -1483,7 +1459,7 @@ struct super_block {
|
|||
#ifdef CONFIG_FS_VERITY
|
||||
const struct fsverity_operations *s_vop;
|
||||
#endif
|
||||
#ifdef CONFIG_UNICODE
|
||||
#if IS_ENABLED(CONFIG_UNICODE)
|
||||
struct unicode_map *s_encoding;
|
||||
__u16 s_encoding_flags;
|
||||
#endif
|
||||
|
|
@ -2215,31 +2191,13 @@ static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns,
|
|||
!gid_valid(i_gid_into_mnt(mnt_userns, inode));
|
||||
}
|
||||
|
||||
static inline enum rw_hint file_write_hint(struct file *file)
|
||||
{
|
||||
if (file->f_write_hint != WRITE_LIFE_NOT_SET)
|
||||
return file->f_write_hint;
|
||||
|
||||
return file_inode(file)->i_write_hint;
|
||||
}
|
||||
|
||||
static inline int iocb_flags(struct file *file);
|
||||
|
||||
static inline u16 ki_hint_validate(enum rw_hint hint)
|
||||
{
|
||||
typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
|
||||
|
||||
if (hint <= max_hint)
|
||||
return hint;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
|
||||
{
|
||||
*kiocb = (struct kiocb) {
|
||||
.ki_filp = filp,
|
||||
.ki_flags = iocb_flags(filp),
|
||||
.ki_hint = ki_hint_validate(file_write_hint(filp)),
|
||||
.ki_ioprio = get_current_ioprio(),
|
||||
};
|
||||
}
|
||||
|
|
@ -2250,7 +2208,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
|
|||
*kiocb = (struct kiocb) {
|
||||
.ki_filp = filp,
|
||||
.ki_flags = kiocb_src->ki_flags,
|
||||
.ki_hint = kiocb_src->ki_hint,
|
||||
.ki_ioprio = kiocb_src->ki_ioprio,
|
||||
.ki_pos = kiocb_src->ki_pos,
|
||||
};
|
||||
|
|
@ -2746,54 +2703,6 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
|
|||
extern void make_bad_inode(struct inode *);
|
||||
extern bool is_bad_inode(struct inode *);
|
||||
|
||||
unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t end);
|
||||
|
||||
void invalidate_mapping_pagevec(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t end,
|
||||
unsigned long *nr_pagevec);
|
||||
|
||||
static inline void invalidate_remote_inode(struct inode *inode)
|
||||
{
|
||||
if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
|
||||
S_ISLNK(inode->i_mode))
|
||||
invalidate_mapping_pages(inode->i_mapping, 0, -1);
|
||||
}
|
||||
extern int invalidate_inode_pages2(struct address_space *mapping);
|
||||
extern int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||
pgoff_t start, pgoff_t end);
|
||||
extern int write_inode_now(struct inode *, int);
|
||||
extern int filemap_fdatawrite(struct address_space *);
|
||||
extern int filemap_flush(struct address_space *);
|
||||
extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
|
||||
extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
|
||||
loff_t lend);
|
||||
extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
|
||||
loff_t start_byte, loff_t end_byte);
|
||||
|
||||
static inline int filemap_fdatawait(struct address_space *mapping)
|
||||
{
|
||||
return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
|
||||
}
|
||||
|
||||
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
|
||||
loff_t lend);
|
||||
extern int filemap_write_and_wait_range(struct address_space *mapping,
|
||||
loff_t lstart, loff_t lend);
|
||||
extern int __filemap_fdatawrite_range(struct address_space *mapping,
|
||||
loff_t start, loff_t end, int sync_mode);
|
||||
extern int filemap_fdatawrite_range(struct address_space *mapping,
|
||||
loff_t start, loff_t end);
|
||||
extern int filemap_check_errors(struct address_space *mapping);
|
||||
extern void __filemap_set_wb_err(struct address_space *mapping, int err);
|
||||
int filemap_fdatawrite_wbc(struct address_space *mapping,
|
||||
struct writeback_control *wbc);
|
||||
|
||||
static inline int filemap_write_and_wait(struct address_space *mapping)
|
||||
{
|
||||
return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
|
||||
}
|
||||
|
||||
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
|
||||
loff_t lend);
|
||||
extern int __must_check file_check_and_advance_wb_err(struct file *file);
|
||||
|
|
@ -2805,67 +2714,6 @@ static inline int file_write_and_wait(struct file *file)
|
|||
return file_write_and_wait_range(file, 0, LLONG_MAX);
|
||||
}
|
||||
|
||||
/**
|
||||
* filemap_set_wb_err - set a writeback error on an address_space
|
||||
* @mapping: mapping in which to set writeback error
|
||||
* @err: error to be set in mapping
|
||||
*
|
||||
* When writeback fails in some way, we must record that error so that
|
||||
* userspace can be informed when fsync and the like are called. We endeavor
|
||||
* to report errors on any file that was open at the time of the error. Some
|
||||
* internal callers also need to know when writeback errors have occurred.
|
||||
*
|
||||
* When a writeback error occurs, most filesystems will want to call
|
||||
* filemap_set_wb_err to record the error in the mapping so that it will be
|
||||
* automatically reported whenever fsync is called on the file.
|
||||
*/
|
||||
static inline void filemap_set_wb_err(struct address_space *mapping, int err)
|
||||
{
|
||||
/* Fastpath for common case of no error */
|
||||
if (unlikely(err))
|
||||
__filemap_set_wb_err(mapping, err);
|
||||
}
|
||||
|
||||
/**
|
||||
* filemap_check_wb_err - has an error occurred since the mark was sampled?
|
||||
* @mapping: mapping to check for writeback errors
|
||||
* @since: previously-sampled errseq_t
|
||||
*
|
||||
* Grab the errseq_t value from the mapping, and see if it has changed "since"
|
||||
* the given value was sampled.
|
||||
*
|
||||
* If it has then report the latest error set, otherwise return 0.
|
||||
*/
|
||||
static inline int filemap_check_wb_err(struct address_space *mapping,
|
||||
errseq_t since)
|
||||
{
|
||||
return errseq_check(&mapping->wb_err, since);
|
||||
}
|
||||
|
||||
/**
|
||||
* filemap_sample_wb_err - sample the current errseq_t to test for later errors
|
||||
* @mapping: mapping to be sampled
|
||||
*
|
||||
* Writeback errors are always reported relative to a particular sample point
|
||||
* in the past. This function provides those sample points.
|
||||
*/
|
||||
static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
|
||||
{
|
||||
return errseq_sample(&mapping->wb_err);
|
||||
}
|
||||
|
||||
/**
|
||||
* file_sample_sb_err - sample the current errseq_t to test for later errors
|
||||
* @file: file pointer to be sampled
|
||||
*
|
||||
* Grab the most current superblock-level errseq_t value for the given
|
||||
* struct file.
|
||||
*/
|
||||
static inline errseq_t file_sample_sb_err(struct file *file)
|
||||
{
|
||||
return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
|
||||
}
|
||||
|
||||
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
extern int vfs_fsync(struct file *file, int datasync);
|
||||
|
|
@ -3108,6 +2956,16 @@ extern void free_inode_nonrcu(struct inode *inode);
|
|||
extern int should_remove_suid(struct dentry *);
|
||||
extern int file_remove_privs(struct file *);
|
||||
|
||||
/*
|
||||
* This must be used for allocating filesystems specific inodes to set
|
||||
* up the inode reclaim context correctly.
|
||||
*/
|
||||
static inline void *
|
||||
alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp)
|
||||
{
|
||||
return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp);
|
||||
}
|
||||
|
||||
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
|
||||
static inline void insert_inode_hash(struct inode *inode)
|
||||
{
|
||||
|
|
@ -3130,6 +2988,7 @@ extern int sb_min_blocksize(struct super_block *, int);
|
|||
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
|
||||
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
|
||||
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
|
||||
int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
|
||||
extern int generic_write_check_limits(struct file *file, loff_t pos,
|
||||
loff_t *count);
|
||||
extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
|
||||
|
|
@ -3139,7 +2998,7 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
|
|||
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
|
||||
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
|
||||
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
|
||||
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
|
||||
ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
|
||||
|
||||
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
|
||||
rwf_t flags);
|
||||
|
|
@ -3173,6 +3032,7 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
|
|||
int whence, loff_t size);
|
||||
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
|
||||
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
|
||||
int rw_verify_area(int, struct file *, const loff_t *, size_t);
|
||||
extern int generic_file_open(struct inode * inode, struct file * filp);
|
||||
extern int nonseekable_open(struct inode * inode, struct file * filp);
|
||||
extern int stream_open(struct inode * inode, struct file * filp);
|
||||
|
|
@ -3322,8 +3182,6 @@ extern int simple_rename(struct user_namespace *, struct inode *,
|
|||
extern void simple_recursive_removal(struct dentry *,
|
||||
void (*callback)(struct dentry *));
|
||||
extern int noop_fsync(struct file *, loff_t, loff_t, int);
|
||||
extern void noop_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length);
|
||||
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
|
||||
extern int simple_empty(struct dentry *);
|
||||
extern int simple_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
|
@ -3608,15 +3466,4 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
|
|||
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
|
||||
int advice);
|
||||
|
||||
/*
|
||||
* Flush file data before changing attributes. Caller must hold any locks
|
||||
* required to prevent further writes to this file until we're done setting
|
||||
* flags.
|
||||
*/
|
||||
static inline int inode_drain_writes(struct inode *inode)
|
||||
{
|
||||
inode_dio_wait(inode);
|
||||
return filemap_write_and_wait(inode->i_mapping);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FS_H */
|
||||
|
|
|
|||
1
include/linux/fs_api.h
Normal file
1
include/linux/fs_api.h
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include <linux/fs.h>
|
||||
|
|
@ -456,6 +456,20 @@ int fscache_begin_read_operation(struct netfs_cache_resources *cres,
|
|||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_end_operation - End the read operation for the netfs lib
|
||||
* @cres: The cache resources for the read operation
|
||||
*
|
||||
* Clean up the resources at the end of the read request.
|
||||
*/
|
||||
static inline void fscache_end_operation(struct netfs_cache_resources *cres)
|
||||
{
|
||||
const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
|
||||
|
||||
if (ops)
|
||||
ops->end_operation(cres);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_read - Start a read from the cache.
|
||||
* @cres: The cache resources to use
|
||||
|
|
@ -616,9 +630,11 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
|
|||
}
|
||||
|
||||
#if __fscache_available
|
||||
extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie);
|
||||
bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio,
|
||||
struct fscache_cookie *cookie);
|
||||
#else
|
||||
#define fscache_set_page_dirty(PAGE, COOKIE) (__set_page_dirty_nobuffers((PAGE)))
|
||||
#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \
|
||||
filemap_dirty_folio(MAPPING, FOLIO)
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
|
@ -626,7 +642,7 @@ extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cook
|
|||
* @wbc: The writeback control
|
||||
* @cookie: The cookie referring to the cache object
|
||||
*
|
||||
* Unpin the writeback resources pinned by fscache_set_page_dirty(). This is
|
||||
* Unpin the writeback resources pinned by fscache_dirty_folio(). This is
|
||||
* intended to be called by the netfs's ->write_inode() method.
|
||||
*/
|
||||
static inline void fscache_unpin_writeback(struct writeback_control *wbc,
|
||||
|
|
|
|||
|
|
@ -714,6 +714,10 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
|
|||
bool fscrypt_mergeable_bio_bh(struct bio *bio,
|
||||
const struct buffer_head *next_bh);
|
||||
|
||||
bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter);
|
||||
|
||||
u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
|
||||
|
||||
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
|
||||
|
||||
static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
|
||||
|
|
@ -742,6 +746,20 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
|
|||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool fscrypt_dio_supported(struct kiocb *iocb,
|
||||
struct iov_iter *iter)
|
||||
{
|
||||
const struct inode *inode = file_inode(iocb->ki_filp);
|
||||
|
||||
return !fscrypt_needs_contents_encryption(inode);
|
||||
}
|
||||
|
||||
static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
|
||||
u64 nr_blocks)
|
||||
{
|
||||
return nr_blocks;
|
||||
}
|
||||
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -224,6 +224,43 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
|
|||
dir, &new_dentry->d_name, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_delete - @dentry was unlinked and unhashed
|
||||
*
|
||||
* Caller must make sure that dentry->d_name is stable.
|
||||
*
|
||||
* Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
|
||||
* as this may be called after d_delete() and old_dentry may be negative.
|
||||
*/
|
||||
static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
|
||||
struct dentry *dentry)
|
||||
{
|
||||
__u32 mask = FS_DELETE;
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
mask |= FS_ISDIR;
|
||||
|
||||
fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name,
|
||||
0);
|
||||
}
|
||||
|
||||
/**
|
||||
* d_delete_notify - delete a dentry and call fsnotify_delete()
|
||||
* @dentry: The dentry to delete
|
||||
*
|
||||
* This helper is used to guaranty that the unlinked inode cannot be found
|
||||
* by lookup of this name after fsnotify_delete() event has been delivered.
|
||||
*/
|
||||
static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
struct inode *inode = d_inode(dentry);
|
||||
|
||||
ihold(inode);
|
||||
d_delete(dentry);
|
||||
fsnotify_delete(dir, inode, dentry);
|
||||
iput(inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* fsnotify_unlink - 'name' was unlinked
|
||||
*
|
||||
|
|
@ -231,10 +268,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
|
|||
*/
|
||||
static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
/* Expected to be called before d_delete() */
|
||||
WARN_ON_ONCE(d_is_negative(dentry));
|
||||
if (WARN_ON_ONCE(d_is_negative(dentry)))
|
||||
return;
|
||||
|
||||
fsnotify_dirent(dir, dentry, FS_DELETE);
|
||||
fsnotify_delete(dir, d_inode(dentry), dentry);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -258,10 +295,10 @@ static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
|
|||
*/
|
||||
static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
|
||||
{
|
||||
/* Expected to be called before d_delete() */
|
||||
WARN_ON_ONCE(d_is_negative(dentry));
|
||||
if (WARN_ON_ONCE(d_is_negative(dentry)))
|
||||
return;
|
||||
|
||||
fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
|
||||
fsnotify_delete(dir, d_inode(dentry), dentry);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -601,6 +601,25 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
|
|||
|
||||
/* functions used to manipulate the marks attached to inodes */
|
||||
|
||||
/* Get mask for calculating object interest taking ignored mask into account */
|
||||
static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
|
||||
{
|
||||
__u32 mask = mark->mask;
|
||||
|
||||
if (!mark->ignored_mask)
|
||||
return mask;
|
||||
|
||||
/* Interest in FS_MODIFY may be needed for clearing ignored mask */
|
||||
if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
|
||||
mask |= FS_MODIFY;
|
||||
|
||||
/*
|
||||
* If mark is interested in ignoring events on children, the object must
|
||||
* show interest in those events for fsnotify_parent() to notice it.
|
||||
*/
|
||||
return mask | (mark->ignored_mask & ALL_FSNOTIFY_EVENTS);
|
||||
}
|
||||
|
||||
/* Get mask of events for a list of marks */
|
||||
extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn);
|
||||
/* Calculate mask of events for a list of marks */
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
|
|||
*
|
||||
* This checks whether ->i_verity_info has been set.
|
||||
*
|
||||
* Filesystems call this from ->readpages() to check whether the pages need to
|
||||
* Filesystems call this from ->readahead() to check whether the pages need to
|
||||
* be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
|
||||
* a race condition where the file is being read concurrently with
|
||||
* FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.)
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <linux/trace_recursion.h>
|
||||
#include <linux/trace_clock.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/bitops.h>
|
||||
|
|
@ -30,6 +31,12 @@
|
|||
#define ARCH_SUPPORTS_FTRACE_OPS 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void ftrace_boot_snapshot(void);
|
||||
#else
|
||||
static inline void ftrace_boot_snapshot(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
struct ftrace_ops;
|
||||
struct ftrace_regs;
|
||||
|
|
@ -215,7 +222,10 @@ struct ftrace_ops_hash {
|
|||
void ftrace_free_init_mem(void);
|
||||
void ftrace_free_mem(struct module *mod, void *start, void *end);
|
||||
#else
|
||||
static inline void ftrace_free_init_mem(void) { }
|
||||
static inline void ftrace_free_init_mem(void)
|
||||
{
|
||||
ftrace_boot_snapshot();
|
||||
}
|
||||
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
|
||||
#endif
|
||||
|
||||
|
|
@ -512,6 +522,8 @@ struct dyn_ftrace {
|
|||
|
||||
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
|
||||
int remove, int reset);
|
||||
int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
|
||||
unsigned int cnt, int remove, int reset);
|
||||
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
|
||||
int len, int reset);
|
||||
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
|
||||
|
|
@ -802,6 +814,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
|
|||
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
|
||||
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
|
||||
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
|
||||
#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
|
||||
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
|
||||
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
|
||||
#define ftrace_free_filter(ops) do { } while (0)
|
||||
|
|
@ -1006,7 +1019,20 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
|
|||
extern int register_ftrace_graph(struct fgraph_ops *ops);
|
||||
extern void unregister_ftrace_graph(struct fgraph_ops *ops);
|
||||
|
||||
extern bool ftrace_graph_is_dead(void);
|
||||
/**
|
||||
* ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
|
||||
*
|
||||
* ftrace_graph_stop() is called when a severe error is detected in
|
||||
* the function graph tracing. This function is called by the critical
|
||||
* paths of function graph to keep those paths from doing any more harm.
|
||||
*/
|
||||
DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
|
||||
|
||||
static inline bool ftrace_graph_is_dead(void)
|
||||
{
|
||||
return static_branch_unlikely(&kill_ftrace_graph);
|
||||
}
|
||||
|
||||
extern void ftrace_graph_stop(void);
|
||||
|
||||
/* The current handlers in use */
|
||||
|
|
|
|||
|
|
@ -1,291 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_GENHD_H
|
||||
#define _LINUX_GENHD_H
|
||||
|
||||
/*
|
||||
* genhd.h Copyright (C) 1992 Drew Eckhardt
|
||||
* Generic hard disk header file by
|
||||
* Drew Eckhardt
|
||||
*
|
||||
* <drew@colorado.edu>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/blk_types.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
extern const struct device_type disk_type;
|
||||
extern struct device_type part_type;
|
||||
extern struct class block_class;
|
||||
|
||||
#define DISK_MAX_PARTS 256
|
||||
#define DISK_NAME_LEN 32
|
||||
|
||||
#define PARTITION_META_INFO_VOLNAMELTH 64
|
||||
/*
|
||||
* Enough for the string representation of any kind of UUID plus NULL.
|
||||
* EFI UUID is 36 characters. MSDOS UUID is 11 characters.
|
||||
*/
|
||||
#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
|
||||
|
||||
struct partition_meta_info {
|
||||
char uuid[PARTITION_META_INFO_UUIDLTH];
|
||||
u8 volname[PARTITION_META_INFO_VOLNAMELTH];
|
||||
};
|
||||
|
||||
/**
|
||||
* DOC: genhd capability flags
|
||||
*
|
||||
* ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
|
||||
* removable media. When set, the device remains present even when media is not
|
||||
* inserted. Shall not be set for devices which are removed entirely when the
|
||||
* media is removed.
|
||||
*
|
||||
* ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
|
||||
* doesn't appear in sysfs, and can't be opened from userspace or using
|
||||
* blkdev_get*. Used for the underlying components of multipath devices.
|
||||
*
|
||||
* ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
|
||||
* scan for partitions from add_disk, and users can't add partitions manually.
|
||||
*
|
||||
*/
|
||||
enum {
|
||||
GENHD_FL_REMOVABLE = 1 << 0,
|
||||
GENHD_FL_HIDDEN = 1 << 1,
|
||||
GENHD_FL_NO_PART = 1 << 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
|
||||
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
|
||||
};
|
||||
|
||||
enum {
|
||||
/* Poll even if events_poll_msecs is unset */
|
||||
DISK_EVENT_FLAG_POLL = 1 << 0,
|
||||
/* Forward events to udev */
|
||||
DISK_EVENT_FLAG_UEVENT = 1 << 1,
|
||||
/* Block event polling when open for exclusive write */
|
||||
DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
|
||||
};
|
||||
|
||||
struct disk_events;
|
||||
struct badblocks;
|
||||
|
||||
struct blk_integrity {
|
||||
const struct blk_integrity_profile *profile;
|
||||
unsigned char flags;
|
||||
unsigned char tuple_size;
|
||||
unsigned char interval_exp;
|
||||
unsigned char tag_size;
|
||||
};
|
||||
|
||||
struct gendisk {
|
||||
/*
|
||||
* major/first_minor/minors should not be set by any new driver, the
|
||||
* block core will take care of allocating them automatically.
|
||||
*/
|
||||
int major;
|
||||
int first_minor;
|
||||
int minors;
|
||||
|
||||
char disk_name[DISK_NAME_LEN]; /* name of major driver */
|
||||
|
||||
unsigned short events; /* supported events */
|
||||
unsigned short event_flags; /* flags related to event processing */
|
||||
|
||||
struct xarray part_tbl;
|
||||
struct block_device *part0;
|
||||
|
||||
const struct block_device_operations *fops;
|
||||
struct request_queue *queue;
|
||||
void *private_data;
|
||||
|
||||
int flags;
|
||||
unsigned long state;
|
||||
#define GD_NEED_PART_SCAN 0
|
||||
#define GD_READ_ONLY 1
|
||||
#define GD_DEAD 2
|
||||
#define GD_NATIVE_CAPACITY 3
|
||||
|
||||
struct mutex open_mutex; /* open/close mutex */
|
||||
unsigned open_partitions; /* number of open partitions */
|
||||
|
||||
struct backing_dev_info *bdi;
|
||||
struct kobject *slave_dir;
|
||||
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
|
||||
struct list_head slave_bdevs;
|
||||
#endif
|
||||
struct timer_rand_state *random;
|
||||
atomic_t sync_io; /* RAID */
|
||||
struct disk_events *ev;
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
struct kobject integrity_kobj;
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
#if IS_ENABLED(CONFIG_CDROM)
|
||||
struct cdrom_device_info *cdi;
|
||||
#endif
|
||||
int node_id;
|
||||
struct badblocks *bb;
|
||||
struct lockdep_map lockdep_map;
|
||||
u64 diskseq;
|
||||
};
|
||||
|
||||
static inline bool disk_live(struct gendisk *disk)
|
||||
{
|
||||
return !inode_unhashed(disk->part0->bd_inode);
|
||||
}
|
||||
|
||||
/*
|
||||
* The gendisk is refcounted by the part0 block_device, and the bd_device
|
||||
* therein is also used for device model presentation in sysfs.
|
||||
*/
|
||||
#define dev_to_disk(device) \
|
||||
(dev_to_bdev(device)->bd_disk)
|
||||
#define disk_to_dev(disk) \
|
||||
(&((disk)->part0->bd_device))
|
||||
|
||||
#if IS_REACHABLE(CONFIG_CDROM)
|
||||
#define disk_to_cdi(disk) ((disk)->cdi)
|
||||
#else
|
||||
#define disk_to_cdi(disk) NULL
|
||||
#endif
|
||||
|
||||
static inline dev_t disk_devt(struct gendisk *disk)
|
||||
{
|
||||
return MKDEV(disk->major, disk->first_minor);
|
||||
}
|
||||
|
||||
void disk_uevent(struct gendisk *disk, enum kobject_action action);
|
||||
|
||||
/* block/genhd.c */
|
||||
int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
const struct attribute_group **groups);
|
||||
static inline int __must_check add_disk(struct gendisk *disk)
|
||||
{
|
||||
return device_add_disk(NULL, disk, NULL);
|
||||
}
|
||||
extern void del_gendisk(struct gendisk *gp);
|
||||
|
||||
void invalidate_disk(struct gendisk *disk);
|
||||
|
||||
void set_disk_ro(struct gendisk *disk, bool read_only);
|
||||
|
||||
static inline int get_disk_ro(struct gendisk *disk)
|
||||
{
|
||||
return disk->part0->bd_read_only ||
|
||||
test_bit(GD_READ_ONLY, &disk->state);
|
||||
}
|
||||
|
||||
static inline int bdev_read_only(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
|
||||
}
|
||||
|
||||
extern void disk_block_events(struct gendisk *disk);
|
||||
extern void disk_unblock_events(struct gendisk *disk);
|
||||
extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
|
||||
bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
|
||||
bool disk_force_media_change(struct gendisk *disk, unsigned int events);
|
||||
|
||||
/* drivers/char/random.c */
|
||||
extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
|
||||
extern void rand_initialize_disk(struct gendisk *disk);
|
||||
|
||||
static inline sector_t get_start_sect(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_start_sect;
|
||||
}
|
||||
|
||||
static inline sector_t bdev_nr_sectors(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_nr_sectors;
|
||||
}
|
||||
|
||||
static inline loff_t bdev_nr_bytes(struct block_device *bdev)
|
||||
{
|
||||
return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
static inline sector_t get_capacity(struct gendisk *disk)
|
||||
{
|
||||
return bdev_nr_sectors(disk->part0);
|
||||
}
|
||||
|
||||
static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
|
||||
{
|
||||
return bdev_nr_sectors(sb->s_bdev) >>
|
||||
(sb->s_blocksize_bits - SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
|
||||
void blk_drop_partitions(struct gendisk *disk);
|
||||
|
||||
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
||||
struct lock_class_key *lkclass);
|
||||
extern void put_disk(struct gendisk *disk);
|
||||
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
|
||||
|
||||
/**
|
||||
* blk_alloc_disk - allocate a gendisk structure
|
||||
* @node_id: numa node to allocate on
|
||||
*
|
||||
* Allocate and pre-initialize a gendisk structure for use with BIO based
|
||||
* drivers.
|
||||
*
|
||||
* Context: can sleep
|
||||
*/
|
||||
#define blk_alloc_disk(node_id) \
|
||||
({ \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__blk_alloc_disk(node_id, &__key); \
|
||||
})
|
||||
void blk_cleanup_disk(struct gendisk *disk);
|
||||
|
||||
int __register_blkdev(unsigned int major, const char *name,
|
||||
void (*probe)(dev_t devt));
|
||||
#define register_blkdev(major, name) \
|
||||
__register_blkdev(major, name, NULL)
|
||||
void unregister_blkdev(unsigned int major, const char *name);
|
||||
|
||||
bool bdev_check_media_change(struct block_device *bdev);
|
||||
int __invalidate_device(struct block_device *bdev, bool kill_dirty);
|
||||
void set_capacity(struct gendisk *disk, sector_t size);
|
||||
|
||||
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
|
||||
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
|
||||
void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
|
||||
int bd_register_pending_holders(struct gendisk *disk);
|
||||
#else
|
||||
static inline int bd_link_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void bd_unlink_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
}
|
||||
static inline int bd_register_pending_holders(struct gendisk *disk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
|
||||
|
||||
dev_t part_devt(struct gendisk *disk, u8 partno);
|
||||
void inc_diskseq(struct gendisk *disk);
|
||||
dev_t blk_lookup_devt(const char *name, int partno);
|
||||
void blk_request_module(dev_t devt);
|
||||
#ifdef CONFIG_BLOCK
|
||||
void printk_all_partitions(void);
|
||||
#else /* CONFIG_BLOCK */
|
||||
static inline void printk_all_partitions(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
#endif /* _LINUX_GENHD_H */
|
||||
|
|
@ -54,9 +54,17 @@ struct vm_area_struct;
|
|||
#define ___GFP_THISNODE 0x200000u
|
||||
#define ___GFP_ACCOUNT 0x400000u
|
||||
#define ___GFP_ZEROTAGS 0x800000u
|
||||
#define ___GFP_SKIP_KASAN_POISON 0x1000000u
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
#define ___GFP_SKIP_ZERO 0x1000000u
|
||||
#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u
|
||||
#define ___GFP_SKIP_KASAN_POISON 0x4000000u
|
||||
#else
|
||||
#define ___GFP_SKIP_ZERO 0
|
||||
#define ___GFP_SKIP_KASAN_UNPOISON 0
|
||||
#define ___GFP_SKIP_KASAN_POISON 0
|
||||
#endif
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define ___GFP_NOLOCKDEP 0x2000000u
|
||||
#define ___GFP_NOLOCKDEP 0x8000000u
|
||||
#else
|
||||
#define ___GFP_NOLOCKDEP 0
|
||||
#endif
|
||||
|
|
@ -79,7 +87,7 @@ struct vm_area_struct;
|
|||
* DOC: Page mobility and placement hints
|
||||
*
|
||||
* Page mobility and placement hints
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
* ---------------------------------
|
||||
*
|
||||
* These flags provide hints about how mobile the page is. Pages with similar
|
||||
* mobility are placed within the same pageblocks to minimise problems due
|
||||
|
|
@ -112,7 +120,7 @@ struct vm_area_struct;
|
|||
* DOC: Watermark modifiers
|
||||
*
|
||||
* Watermark modifiers -- controls access to emergency reserves
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
* ------------------------------------------------------------
|
||||
*
|
||||
* %__GFP_HIGH indicates that the caller is high-priority and that granting
|
||||
* the request is necessary before the system can make forward progress.
|
||||
|
|
@ -144,7 +152,7 @@ struct vm_area_struct;
|
|||
* DOC: Reclaim modifiers
|
||||
*
|
||||
* Reclaim modifiers
|
||||
* ~~~~~~~~~~~~~~~~~
|
||||
* -----------------
|
||||
* Please note that all the following flags are only applicable to sleepable
|
||||
* allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
|
||||
*
|
||||
|
|
@ -224,7 +232,7 @@ struct vm_area_struct;
|
|||
* DOC: Action modifiers
|
||||
*
|
||||
* Action modifiers
|
||||
* ~~~~~~~~~~~~~~~~
|
||||
* ----------------
|
||||
*
|
||||
* %__GFP_NOWARN suppresses allocation failure reports.
|
||||
*
|
||||
|
|
@ -232,31 +240,38 @@ struct vm_area_struct;
|
|||
*
|
||||
* %__GFP_ZERO returns a zeroed page on success.
|
||||
*
|
||||
* %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if
|
||||
* __GFP_ZERO is set.
|
||||
* %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
|
||||
* is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
|
||||
* __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
|
||||
* memory tags at the same time as zeroing memory has minimal additional
|
||||
* performace impact.
|
||||
*
|
||||
* %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned
|
||||
* on deallocation. Typically used for userspace pages. Currently only has an
|
||||
* effect in HW tags mode.
|
||||
* %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation.
|
||||
* Only effective in HW_TAGS mode.
|
||||
*
|
||||
* %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation.
|
||||
* Typically, used for userspace pages. Only effective in HW_TAGS mode.
|
||||
*/
|
||||
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
|
||||
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
|
||||
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
|
||||
#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
|
||||
#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
|
||||
#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
|
||||
#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON)
|
||||
#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON)
|
||||
|
||||
/* Disable lockdep for GFP context tracking */
|
||||
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
|
||||
|
||||
/* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
|
||||
#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/**
|
||||
* DOC: Useful GFP flag combinations
|
||||
*
|
||||
* Useful GFP flag combinations
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
* ----------------------------
|
||||
*
|
||||
* Useful GFP flag combinations that are commonly used. It is recommended
|
||||
* that subsystems start with one of these combinations and then set/clear
|
||||
|
|
@ -598,9 +613,11 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
|
|||
#ifdef CONFIG_NUMA
|
||||
struct page *alloc_pages(gfp_t gfp, unsigned int order);
|
||||
struct folio *folio_alloc(gfp_t gfp, unsigned order);
|
||||
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
|
||||
struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
bool hugepage);
|
||||
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
|
||||
unsigned long addr, bool hugepage);
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
alloc_pages_vma(gfp_mask, order, vma, addr, true)
|
||||
#else
|
||||
|
|
@ -612,8 +629,10 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
|
|||
{
|
||||
return __folio_alloc_node(gfp, order, numa_node_id());
|
||||
}
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, false)\
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \
|
||||
alloc_pages(gfp_mask, order)
|
||||
#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
|
||||
folio_alloc(gfp, order)
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
alloc_pages(gfp_mask, order)
|
||||
#endif
|
||||
|
|
|
|||
1
include/linux/gfp_api.h
Normal file
1
include/linux/gfp_api.h
Normal file
|
|
@ -0,0 +1 @@
|
|||
#include <linux/gfp.h>
|
||||
|
|
@ -8,27 +8,16 @@
|
|||
#include <linux/err.h>
|
||||
|
||||
struct device;
|
||||
|
||||
/**
|
||||
* Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
|
||||
* preferable to the old integer-based handles.
|
||||
*
|
||||
* Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
|
||||
* until the GPIO is released.
|
||||
*/
|
||||
struct gpio_desc;
|
||||
|
||||
/**
|
||||
* Opaque descriptor for a structure of GPIO array attributes. This structure
|
||||
* is attached to struct gpiod_descs obtained from gpiod_get_array() and can be
|
||||
* passed back to get/set array functions in order to activate fast processing
|
||||
* path if applicable.
|
||||
*/
|
||||
struct gpio_array;
|
||||
|
||||
/**
|
||||
* Struct containing an array of descriptors that can be obtained using
|
||||
* gpiod_get_array().
|
||||
* struct gpio_descs - Struct containing an array of descriptors that can be
|
||||
* obtained using gpiod_get_array()
|
||||
*
|
||||
* @info: Pointer to the opaque gpio_array structure
|
||||
* @ndescs: Number of held descriptors
|
||||
* @desc: Array of pointers to GPIO descriptors
|
||||
*/
|
||||
struct gpio_descs {
|
||||
struct gpio_array *info;
|
||||
|
|
@ -43,8 +32,16 @@ struct gpio_descs {
|
|||
#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
|
||||
|
||||
/**
|
||||
* Optional flags that can be passed to one of gpiod_* to configure direction
|
||||
* and output value. These values cannot be OR'd.
|
||||
* enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to
|
||||
* configure direction and output value. These values
|
||||
* cannot be OR'd.
|
||||
*
|
||||
* @GPIOD_ASIS: Don't change anything
|
||||
* @GPIOD_IN: Set lines to input mode
|
||||
* @GPIOD_OUT_LOW: Set lines to output and drive them low
|
||||
* @GPIOD_OUT_HIGH: Set lines to output and drive them high
|
||||
* @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low
|
||||
* @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high
|
||||
*/
|
||||
enum gpiod_flags {
|
||||
GPIOD_ASIS = 0,
|
||||
|
|
|
|||
|
|
@ -168,13 +168,16 @@ struct gpio_irq_chip {
|
|||
|
||||
/**
|
||||
* @parent_handler_data:
|
||||
*
|
||||
* If @per_parent_data is false, @parent_handler_data is a single
|
||||
* pointer used as the data associated with every parent interrupt.
|
||||
*
|
||||
* @parent_handler_data_array:
|
||||
*
|
||||
* Data associated, and passed to, the handler for the parent
|
||||
* interrupt. Can either be a single pointer if @per_parent_data
|
||||
* is false, or an array of @num_parents pointers otherwise. If
|
||||
* @per_parent_data is true, @parent_handler_data_array cannot be
|
||||
* NULL.
|
||||
* If @per_parent_data is true, @parent_handler_data_array is
|
||||
* an array of @num_parents pointers, and is used to associate
|
||||
* different data for each parent. This cannot be NULL if
|
||||
* @per_parent_data is true.
|
||||
*/
|
||||
union {
|
||||
void *parent_handler_data;
|
||||
|
|
@ -218,6 +221,15 @@ struct gpio_irq_chip {
|
|||
*/
|
||||
bool per_parent_data;
|
||||
|
||||
/**
|
||||
* @initialized:
|
||||
*
|
||||
* Flag to track GPIO chip irq member's initialization.
|
||||
* This flag will make sure GPIO chip irq members are not used
|
||||
* before they are initialized.
|
||||
*/
|
||||
bool initialized;
|
||||
|
||||
/**
|
||||
* @init_hw: optional routine to initialize hardware before
|
||||
* an IRQ chip will be added. This is quite useful when
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ enum {
|
|||
struct greybus_descriptor_string {
|
||||
__u8 length;
|
||||
__u8 id;
|
||||
__u8 string[0];
|
||||
__u8 string[];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
|
@ -175,7 +175,7 @@ struct greybus_manifest_header {
|
|||
|
||||
struct greybus_manifest {
|
||||
struct greybus_manifest_header header;
|
||||
struct greybus_descriptor descriptors[0];
|
||||
struct greybus_descriptor descriptors[];
|
||||
} __packed;
|
||||
|
||||
#endif /* __GREYBUS_MANIFEST_H */
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ struct gb_host_device {
|
|||
|
||||
struct gb_svc *svc;
|
||||
/* Private data for the host driver */
|
||||
unsigned long hd_priv[0] __aligned(sizeof(s64));
|
||||
unsigned long hd_priv[] __aligned(sizeof(s64));
|
||||
};
|
||||
#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue