Merge remote-tracking branches 'spi/topic/rockchip', 'spi/topic/rspi', 'spi/topic/s3c64xx', 'spi/topic/sh-msiof' and 'spi/topic/slave' into spi-next
This commit is contained in:
commit
e2a3b0df8d
11292 changed files with 729670 additions and 243351 deletions
|
|
@ -56,6 +56,27 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
|
|||
acpi_fwnode_handle(adev) : NULL)
|
||||
#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
|
||||
|
||||
static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
|
||||
{
|
||||
struct fwnode_handle *fwnode;
|
||||
|
||||
fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL);
|
||||
if (!fwnode)
|
||||
return NULL;
|
||||
|
||||
fwnode->type = FWNODE_ACPI_STATIC;
|
||||
|
||||
return fwnode;
|
||||
}
|
||||
|
||||
static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
|
||||
{
|
||||
if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC))
|
||||
return;
|
||||
|
||||
kfree(fwnode);
|
||||
}
|
||||
|
||||
/**
|
||||
* ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
|
||||
* the PCI-defined class-code information
|
||||
|
|
@ -220,10 +241,6 @@ int __init acpi_table_parse_entries(char *id, unsigned long table_size,
|
|||
int entry_id,
|
||||
acpi_tbl_entry_handler handler,
|
||||
unsigned int max_entries);
|
||||
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
|
||||
int entry_id,
|
||||
acpi_tbl_entry_handler handler,
|
||||
unsigned int max_entries);
|
||||
int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
|
||||
struct acpi_subtable_proc *proc, int proc_num,
|
||||
unsigned int max_entries);
|
||||
|
|
@ -420,6 +437,8 @@ static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
|
|||
return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
|
||||
}
|
||||
|
||||
struct acpi_device *acpi_resource_consumer(struct resource *res);
|
||||
|
||||
int acpi_check_resource_conflict(const struct resource *res);
|
||||
|
||||
int acpi_check_region(resource_size_t start, resource_size_t n,
|
||||
|
|
@ -469,6 +488,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
|
|||
#define OSC_SB_CPCV2_SUPPORT 0x00000040
|
||||
#define OSC_SB_PCLPI_SUPPORT 0x00000080
|
||||
#define OSC_SB_OSLPI_SUPPORT 0x00000100
|
||||
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
|
||||
|
||||
extern bool osc_sb_apei_support_acked;
|
||||
extern bool osc_pc_lpi_support_confirmed;
|
||||
|
|
@ -744,6 +764,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
|
|||
return DEV_DMA_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline void acpi_dma_configure(struct device *dev,
|
||||
enum dev_dma_attr attr) { }
|
||||
|
||||
static inline void acpi_dma_deconfigure(struct device *dev) { }
|
||||
|
||||
#define ACPI_PTR(_ptr) (NULL)
|
||||
|
||||
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
|
||||
|
|
@ -764,6 +789,11 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_ACPI */
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
|
|
|
|||
|
|
@ -23,20 +23,36 @@
|
|||
#include <linux/fwnode.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
|
||||
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
|
||||
|
||||
int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
|
||||
void iort_deregister_domain_token(int trans_id);
|
||||
struct fwnode_handle *iort_find_domain_token(int trans_id);
|
||||
#ifdef CONFIG_ACPI_IORT
|
||||
void acpi_iort_init(void);
|
||||
bool iort_node_match(u8 type);
|
||||
u32 iort_msi_map_rid(struct device *dev, u32 req_id);
|
||||
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
|
||||
/* IOMMU interface */
|
||||
void iort_set_dma_mask(struct device *dev);
|
||||
const struct iommu_ops *iort_iommu_configure(struct device *dev);
|
||||
#else
|
||||
static inline void acpi_iort_init(void) { }
|
||||
static inline bool iort_node_match(u8 type) { return false; }
|
||||
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
|
||||
{ return req_id; }
|
||||
static inline struct irq_domain *iort_get_device_domain(struct device *dev,
|
||||
u32 req_id)
|
||||
{ return NULL; }
|
||||
/* IOMMU interface */
|
||||
static inline void iort_set_dma_mask(struct device *dev) { }
|
||||
static inline
|
||||
const struct iommu_ops *iort_iommu_configure(struct device *dev)
|
||||
{ return NULL; }
|
||||
#endif
|
||||
|
||||
#define IORT_ACPI_DECLARE(name, table_id, fn) \
|
||||
ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn)
|
||||
|
||||
#endif /* __ACPI_IORT_H__ */
|
||||
|
|
|
|||
28
include/linux/ahci-remap.h
Normal file
28
include/linux/ahci-remap.h
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
#ifndef _LINUX_AHCI_REMAP_H
|
||||
#define _LINUX_AHCI_REMAP_H
|
||||
|
||||
#include <linux/sizes.h>
|
||||
|
||||
#define AHCI_VSCAP 0xa4
|
||||
#define AHCI_REMAP_CAP 0x800
|
||||
|
||||
/* device class code */
|
||||
#define AHCI_REMAP_N_DCC 0x880
|
||||
|
||||
/* remap-device base relative to ahci-bar */
|
||||
#define AHCI_REMAP_N_OFFSET SZ_16K
|
||||
#define AHCI_REMAP_N_SIZE SZ_16K
|
||||
|
||||
#define AHCI_MAX_REMAP 3
|
||||
|
||||
static inline unsigned int ahci_remap_dcc(int i)
|
||||
{
|
||||
return AHCI_REMAP_N_DCC + i * 0x80;
|
||||
}
|
||||
|
||||
static inline unsigned int ahci_remap_base(int i)
|
||||
{
|
||||
return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_AHCI_REMAP_H */
|
||||
|
|
@ -14,14 +14,9 @@ typedef int (kiocb_cancel_fn)(struct kiocb *);
|
|||
/* prototypes */
|
||||
#ifdef CONFIG_AIO
|
||||
extern void exit_aio(struct mm_struct *mm);
|
||||
extern long do_io_submit(aio_context_t ctx_id, long nr,
|
||||
struct iocb __user *__user *iocbpp, bool compat);
|
||||
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
|
||||
#else
|
||||
static inline void exit_aio(struct mm_struct *mm) { }
|
||||
static inline long do_io_submit(aio_context_t ctx_id, long nr,
|
||||
struct iocb __user * __user *iocbpp,
|
||||
bool compat) { return 0; }
|
||||
static inline void kiocb_set_cancel_fn(struct kiocb *req,
|
||||
kiocb_cancel_fn *cancel) { }
|
||||
#endif /* CONFIG_AIO */
|
||||
|
|
|
|||
|
|
@ -10,7 +10,12 @@ enum alarmtimer_type {
|
|||
ALARM_REALTIME,
|
||||
ALARM_BOOTTIME,
|
||||
|
||||
/* Supported types end here */
|
||||
ALARM_NUMTYPE,
|
||||
|
||||
/* Used for tracing information. No usable types. */
|
||||
ALARM_REALTIME_FREEZER,
|
||||
ALARM_BOOTTIME_FREEZER,
|
||||
};
|
||||
|
||||
enum alarmtimer_restart {
|
||||
|
|
|
|||
|
|
@ -1,16 +0,0 @@
|
|||
#include <linux/types.h>
|
||||
|
||||
/* platform data for the PL061 GPIO driver */
|
||||
|
||||
struct pl061_platform_data {
|
||||
/* number of the first GPIO */
|
||||
unsigned gpio_base;
|
||||
|
||||
/* number of the first IRQ.
|
||||
* If the IRQ functionality in not desired this must be set to 0.
|
||||
*/
|
||||
unsigned irq_base;
|
||||
|
||||
u8 directions; /* startup directions, 1: out, 0: in */
|
||||
u8 values; /* startup values */
|
||||
};
|
||||
|
|
@ -84,6 +84,8 @@ struct pl08x_channel_data {
|
|||
* running any DMA transfer and multiplexing can be recycled
|
||||
* @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
|
||||
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
|
||||
* @slave_map: DMA slave matching table
|
||||
* @slave_map_len: number of elements in @slave_map
|
||||
*/
|
||||
struct pl08x_platform_data {
|
||||
struct pl08x_channel_data *slave_channels;
|
||||
|
|
@ -93,6 +95,8 @@ struct pl08x_platform_data {
|
|||
void (*put_xfer_signal)(const struct pl08x_channel_data *, int);
|
||||
u8 lli_buses;
|
||||
u8 mem_buses;
|
||||
const struct dma_slave_map *slave_map;
|
||||
int slave_map_len;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_AMBA_PL08X
|
||||
|
|
|
|||
|
|
@ -348,6 +348,7 @@ enum {
|
|||
ATA_LOG_DEVSLP_DETO = 0x01,
|
||||
ATA_LOG_DEVSLP_VALID = 0x07,
|
||||
ATA_LOG_DEVSLP_VALID_MASK = 0x80,
|
||||
ATA_LOG_NCQ_PRIO_OFFSET = 0x09,
|
||||
|
||||
/* NCQ send and receive log */
|
||||
ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00,
|
||||
|
|
@ -940,6 +941,11 @@ static inline bool ata_id_has_ncq_non_data(const u16 *id)
|
|||
return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5);
|
||||
}
|
||||
|
||||
static inline bool ata_id_has_ncq_prio(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SATA_CAPABILITY] & BIT(12);
|
||||
}
|
||||
|
||||
static inline bool ata_id_has_trim(const u16 *id)
|
||||
{
|
||||
if (ata_id_major_version(id) >= 7 &&
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ extern void audit_log_d_path(struct audit_buffer *ab,
|
|||
extern void audit_log_key(struct audit_buffer *ab,
|
||||
char *key);
|
||||
extern void audit_log_link_denied(const char *operation,
|
||||
struct path *link);
|
||||
const struct path *link);
|
||||
extern void audit_log_lost(const char *message);
|
||||
#ifdef CONFIG_SECURITY
|
||||
extern void audit_log_secctx(struct audit_buffer *ab, u32 secid);
|
||||
|
|
|
|||
|
|
@ -116,6 +116,8 @@ struct bdi_writeback {
|
|||
struct list_head work_list;
|
||||
struct delayed_work dwork; /* work item used for writeback */
|
||||
|
||||
unsigned long dirty_sleep; /* last wait */
|
||||
|
||||
struct list_head bdi_node; /* anchored at bdi->wb_list */
|
||||
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
|
|
@ -136,12 +138,13 @@ struct bdi_writeback {
|
|||
struct backing_dev_info {
|
||||
struct list_head bdi_list;
|
||||
unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
|
||||
unsigned int capabilities; /* Device capabilities */
|
||||
unsigned long io_pages; /* max allowed IO size */
|
||||
congested_fn *congested_fn; /* Function pointer if device is md/dm */
|
||||
void *congested_data; /* Pointer to aux data for congested func */
|
||||
|
||||
char *name;
|
||||
|
||||
unsigned int capabilities; /* Device capabilities */
|
||||
unsigned int min_ratio;
|
||||
unsigned int max_ratio, max_prop_frac;
|
||||
|
||||
|
|
|
|||
|
|
@ -62,6 +62,12 @@
|
|||
#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
|
||||
#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
|
||||
|
||||
/*
|
||||
* Return the data direction, READ or WRITE.
|
||||
*/
|
||||
#define bio_data_dir(bio) \
|
||||
(op_is_write(bio_op(bio)) ? WRITE : READ)
|
||||
|
||||
/*
|
||||
* Check whether this bio carries any data or not. A NULL bio is allowed.
|
||||
*/
|
||||
|
|
@ -70,7 +76,8 @@ static inline bool bio_has_data(struct bio *bio)
|
|||
if (bio &&
|
||||
bio->bi_iter.bi_size &&
|
||||
bio_op(bio) != REQ_OP_DISCARD &&
|
||||
bio_op(bio) != REQ_OP_SECURE_ERASE)
|
||||
bio_op(bio) != REQ_OP_SECURE_ERASE &&
|
||||
bio_op(bio) != REQ_OP_WRITE_ZEROES)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
|
@ -80,18 +87,8 @@ static inline bool bio_no_advance_iter(struct bio *bio)
|
|||
{
|
||||
return bio_op(bio) == REQ_OP_DISCARD ||
|
||||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
|
||||
bio_op(bio) == REQ_OP_WRITE_SAME;
|
||||
}
|
||||
|
||||
static inline bool bio_is_rw(struct bio *bio)
|
||||
{
|
||||
if (!bio_has_data(bio))
|
||||
return false;
|
||||
|
||||
if (bio_no_advance_iter(bio))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
bio_op(bio) == REQ_OP_WRITE_SAME ||
|
||||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
|
||||
}
|
||||
|
||||
static inline bool bio_mergeable(struct bio *bio)
|
||||
|
|
@ -193,18 +190,20 @@ static inline unsigned bio_segments(struct bio *bio)
|
|||
struct bvec_iter iter;
|
||||
|
||||
/*
|
||||
* We special case discard/write same, because they interpret bi_size
|
||||
* differently:
|
||||
* We special case discard/write same/write zeroes, because they
|
||||
* interpret bi_size differently:
|
||||
*/
|
||||
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
return 1;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_SECURE_ERASE)
|
||||
return 1;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME)
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return 0;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
bio_for_each_segment(bv, bio, iter)
|
||||
segs++;
|
||||
|
|
@ -409,6 +408,8 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
|
|||
|
||||
}
|
||||
|
||||
extern blk_qc_t submit_bio(struct bio *);
|
||||
|
||||
extern void bio_endio(struct bio *);
|
||||
|
||||
static inline void bio_io_error(struct bio *bio)
|
||||
|
|
@ -423,13 +424,15 @@ extern int bio_phys_segments(struct request_queue *, struct bio *);
|
|||
extern int submit_bio_wait(struct bio *bio);
|
||||
extern void bio_advance(struct bio *, unsigned);
|
||||
|
||||
extern void bio_init(struct bio *);
|
||||
extern void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
unsigned short max_vecs);
|
||||
extern void bio_reset(struct bio *);
|
||||
void bio_chain(struct bio *, struct bio *);
|
||||
|
||||
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
|
||||
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
||||
unsigned int, unsigned int);
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
|
||||
struct rq_map_data;
|
||||
extern struct bio *bio_map_user_iov(struct request_queue *,
|
||||
const struct iov_iter *, gfp_t);
|
||||
|
|
|
|||
|
|
@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
|
|||
/**
|
||||
* blkg_rwstat_add - add a value to a blkg_rwstat
|
||||
* @rwstat: target blkg_rwstat
|
||||
* @op: REQ_OP
|
||||
* @op_flags: rq_flag_bits
|
||||
* @op: REQ_OP and flags
|
||||
* @val: value to add
|
||||
*
|
||||
* Add @val to @rwstat. The counters are chosen according to @rw. The
|
||||
* caller is responsible for synchronizing calls to this function.
|
||||
*/
|
||||
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
|
||||
int op, int op_flags, uint64_t val)
|
||||
unsigned int op, uint64_t val)
|
||||
{
|
||||
struct percpu_counter *cnt;
|
||||
|
||||
|
|
@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
|
|||
|
||||
__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
|
||||
|
||||
if (op_flags & REQ_SYNC)
|
||||
if (op_is_sync(op))
|
||||
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
|
||||
else
|
||||
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
|
||||
|
|
@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
|
|||
|
||||
if (!throtl) {
|
||||
blkg = blkg ?: q->root_blkg;
|
||||
blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf,
|
||||
blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
|
||||
bio->bi_iter.bi_size);
|
||||
blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1);
|
||||
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/sbitmap.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
struct blk_mq_tags;
|
||||
struct blk_flush_queue;
|
||||
|
|
@ -35,6 +36,8 @@ struct blk_mq_hw_ctx {
|
|||
|
||||
struct blk_mq_tags *tags;
|
||||
|
||||
struct srcu_struct queue_rq_srcu;
|
||||
|
||||
unsigned long queued;
|
||||
unsigned long run;
|
||||
#define BLK_MQ_MAX_DISPATCH_ORDER 7
|
||||
|
|
@ -215,18 +218,20 @@ void blk_mq_start_request(struct request *rq);
|
|||
void blk_mq_end_request(struct request *rq, int error);
|
||||
void __blk_mq_end_request(struct request *rq, int error);
|
||||
|
||||
void blk_mq_requeue_request(struct request *rq);
|
||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
|
||||
void blk_mq_cancel_requeue_work(struct request_queue *q);
|
||||
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
|
||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||
bool kick_requeue_list);
|
||||
void blk_mq_kick_requeue_list(struct request_queue *q);
|
||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
|
||||
void blk_mq_abort_requeue_list(struct request_queue *q);
|
||||
void blk_mq_complete_request(struct request *rq, int error);
|
||||
|
||||
bool blk_mq_queue_stopped(struct request_queue *q);
|
||||
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
|
||||
void blk_mq_stop_hw_queues(struct request_queue *q);
|
||||
void blk_mq_start_hw_queues(struct request_queue *q);
|
||||
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
|
||||
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
|
||||
|
|
@ -237,6 +242,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q);
|
|||
void blk_mq_freeze_queue_start(struct request_queue *q);
|
||||
int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
|
||||
|
||||
int blk_mq_map_queues(struct blk_mq_tag_set *set);
|
||||
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ struct io_context;
|
|||
struct cgroup_subsys_state;
|
||||
typedef void (bio_end_io_t) (struct bio *);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
/*
|
||||
* main unit of I/O for the block layer and lower layers (ie drivers and
|
||||
* stacking drivers)
|
||||
|
|
@ -88,24 +87,6 @@ struct bio {
|
|||
struct bio_vec bi_inline_vecs[0];
|
||||
};
|
||||
|
||||
#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
|
||||
#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
|
||||
#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
|
||||
|
||||
#define bio_set_op_attrs(bio, op, op_flags) do { \
|
||||
if (__builtin_constant_p(op)) \
|
||||
BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \
|
||||
else \
|
||||
WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
|
||||
if (__builtin_constant_p(op_flags)) \
|
||||
BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
|
||||
else \
|
||||
WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
|
||||
(bio)->bi_opf = bio_flags(bio); \
|
||||
(bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \
|
||||
(bio)->bi_opf |= (op_flags); \
|
||||
} while (0)
|
||||
|
||||
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
|
||||
|
||||
/*
|
||||
|
|
@ -119,6 +100,8 @@ struct bio {
|
|||
#define BIO_QUIET 6 /* Make BIO Quiet */
|
||||
#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
|
||||
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
|
||||
#define BIO_THROTTLED 9 /* This bio has already been subjected to
|
||||
* throttling rules. Don't do it again. */
|
||||
|
||||
/*
|
||||
* Flags starting here get preserved by bio_reset() - this includes
|
||||
|
|
@ -142,53 +125,61 @@ struct bio {
|
|||
#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
|
||||
#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
/*
|
||||
* Request flags. For use in the cmd_flags field of struct request, and in
|
||||
* bi_opf of struct bio. Note that some flags are only valid in either one.
|
||||
* Operations and flags common to the bio and request structures.
|
||||
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
|
||||
*
|
||||
* The least significant bit of the operation number indicates the data
|
||||
* transfer direction:
|
||||
*
|
||||
* - if the least significant bit is set transfers are TO the device
|
||||
* - if the least significant bit is not set transfers are FROM the device
|
||||
*
|
||||
* If a operation does not transfer data the least significant bit has no
|
||||
* meaning.
|
||||
*/
|
||||
enum rq_flag_bits {
|
||||
/* common flags */
|
||||
__REQ_FAILFAST_DEV, /* no driver retries of device errors */
|
||||
#define REQ_OP_BITS 8
|
||||
#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
|
||||
#define REQ_FLAG_BITS 24
|
||||
|
||||
enum req_opf {
|
||||
/* read sectors from the device */
|
||||
REQ_OP_READ = 0,
|
||||
/* write sectors to the device */
|
||||
REQ_OP_WRITE = 1,
|
||||
/* flush the volatile write cache */
|
||||
REQ_OP_FLUSH = 2,
|
||||
/* discard sectors */
|
||||
REQ_OP_DISCARD = 3,
|
||||
/* get zone information */
|
||||
REQ_OP_ZONE_REPORT = 4,
|
||||
/* securely erase sectors */
|
||||
REQ_OP_SECURE_ERASE = 5,
|
||||
/* seset a zone write pointer */
|
||||
REQ_OP_ZONE_RESET = 6,
|
||||
/* write the same sector many times */
|
||||
REQ_OP_WRITE_SAME = 7,
|
||||
/* write the zero filled sector many times */
|
||||
REQ_OP_WRITE_ZEROES = 8,
|
||||
|
||||
REQ_OP_LAST,
|
||||
};
|
||||
|
||||
enum req_flag_bits {
|
||||
__REQ_FAILFAST_DEV = /* no driver retries of device errors */
|
||||
REQ_OP_BITS,
|
||||
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
|
||||
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
|
||||
|
||||
__REQ_SYNC, /* request is sync (sync write or read) */
|
||||
__REQ_META, /* metadata io request */
|
||||
__REQ_PRIO, /* boost priority in cfq */
|
||||
|
||||
__REQ_NOIDLE, /* don't anticipate more IO after this one */
|
||||
__REQ_NOMERGE, /* don't touch this for merging */
|
||||
__REQ_IDLE, /* anticipate more IO after this one */
|
||||
__REQ_INTEGRITY, /* I/O includes block integrity payload */
|
||||
__REQ_FUA, /* forced unit access */
|
||||
__REQ_PREFLUSH, /* request for cache flush */
|
||||
|
||||
/* bio only flags */
|
||||
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
||||
__REQ_THROTTLED, /* This bio has already been subjected to
|
||||
* throttling rules. Don't do it again. */
|
||||
|
||||
/* request only flags */
|
||||
__REQ_SORTED, /* elevator knows about this request */
|
||||
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
|
||||
__REQ_NOMERGE, /* don't touch this for merging */
|
||||
__REQ_STARTED, /* drive already may have started this one */
|
||||
__REQ_DONTPREP, /* don't call prep for this one */
|
||||
__REQ_QUEUED, /* uses queueing */
|
||||
__REQ_ELVPRIV, /* elevator private data attached */
|
||||
__REQ_FAILED, /* set if the request failed */
|
||||
__REQ_QUIET, /* don't worry about errors */
|
||||
__REQ_PREEMPT, /* set for "ide_preempt" requests and also
|
||||
for requests for which the SCSI "quiesce"
|
||||
state must be ignored. */
|
||||
__REQ_ALLOCED, /* request came from our alloc pool */
|
||||
__REQ_COPY_USER, /* contains copies of user pages */
|
||||
__REQ_FLUSH_SEQ, /* request for flush sequence */
|
||||
__REQ_IO_STAT, /* account I/O stat */
|
||||
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
|
||||
__REQ_PM, /* runtime pm request */
|
||||
__REQ_HASHED, /* on IO scheduler merge hash */
|
||||
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
|
||||
__REQ_BACKGROUND, /* background IO */
|
||||
__REQ_NR_BITS, /* stops here */
|
||||
};
|
||||
|
||||
|
|
@ -198,54 +189,47 @@ enum rq_flag_bits {
|
|||
#define REQ_SYNC (1ULL << __REQ_SYNC)
|
||||
#define REQ_META (1ULL << __REQ_META)
|
||||
#define REQ_PRIO (1ULL << __REQ_PRIO)
|
||||
#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
|
||||
#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
|
||||
#define REQ_IDLE (1ULL << __REQ_IDLE)
|
||||
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
|
||||
#define REQ_FUA (1ULL << __REQ_FUA)
|
||||
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
|
||||
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
||||
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
|
||||
|
||||
#define REQ_FAILFAST_MASK \
|
||||
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
|
||||
#define REQ_COMMON_MASK \
|
||||
(REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
|
||||
REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
|
||||
#define REQ_CLONE_MASK REQ_COMMON_MASK
|
||||
|
||||
/* This mask is used for both bio and request merge checking */
|
||||
#define REQ_NOMERGE_FLAGS \
|
||||
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
|
||||
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
|
||||
|
||||
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
||||
#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
|
||||
#define bio_op(bio) \
|
||||
((bio)->bi_opf & REQ_OP_MASK)
|
||||
#define req_op(req) \
|
||||
((req)->cmd_flags & REQ_OP_MASK)
|
||||
|
||||
#define REQ_SORTED (1ULL << __REQ_SORTED)
|
||||
#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
|
||||
#define REQ_FUA (1ULL << __REQ_FUA)
|
||||
#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
|
||||
#define REQ_STARTED (1ULL << __REQ_STARTED)
|
||||
#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
|
||||
#define REQ_QUEUED (1ULL << __REQ_QUEUED)
|
||||
#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
|
||||
#define REQ_FAILED (1ULL << __REQ_FAILED)
|
||||
#define REQ_QUIET (1ULL << __REQ_QUIET)
|
||||
#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
|
||||
#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
|
||||
#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
|
||||
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
|
||||
#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
|
||||
#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
|
||||
#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
|
||||
#define REQ_PM (1ULL << __REQ_PM)
|
||||
#define REQ_HASHED (1ULL << __REQ_HASHED)
|
||||
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
|
||||
/* obsolete, don't use in new code */
|
||||
static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
|
||||
unsigned op_flags)
|
||||
{
|
||||
bio->bi_opf = op | op_flags;
|
||||
}
|
||||
|
||||
enum req_op {
|
||||
REQ_OP_READ,
|
||||
REQ_OP_WRITE,
|
||||
REQ_OP_DISCARD, /* request to discard sectors */
|
||||
REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
|
||||
REQ_OP_WRITE_SAME, /* write same block many times */
|
||||
REQ_OP_FLUSH, /* request for cache flush */
|
||||
};
|
||||
static inline bool op_is_write(unsigned int op)
|
||||
{
|
||||
return (op & 1);
|
||||
}
|
||||
|
||||
#define REQ_OP_BITS 3
|
||||
/*
|
||||
* Reads are always treated as synchronous, as are requests with the FUA or
|
||||
* PREFLUSH flag. Other operations may be marked as synchronous using the
|
||||
* REQ_SYNC flag.
|
||||
*/
|
||||
static inline bool op_is_sync(unsigned int op)
|
||||
{
|
||||
return (op & REQ_OP_MASK) == REQ_OP_READ ||
|
||||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
|
||||
}
|
||||
|
||||
typedef unsigned int blk_qc_t;
|
||||
#define BLK_QC_T_NONE -1U
|
||||
|
|
@ -271,4 +255,20 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
|
|||
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
|
||||
}
|
||||
|
||||
struct blk_issue_stat {
|
||||
u64 time;
|
||||
};
|
||||
|
||||
#define BLK_RQ_STAT_BATCH 64
|
||||
|
||||
struct blk_rq_stat {
|
||||
s64 mean;
|
||||
u64 min;
|
||||
u64 max;
|
||||
s32 nr_samples;
|
||||
s32 nr_batch;
|
||||
u64 batch;
|
||||
s64 time;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_BLK_TYPES_H */
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/rcupdate.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/blkzoned.h>
|
||||
|
||||
struct module;
|
||||
struct scsi_ioctl_command;
|
||||
|
|
@ -37,6 +38,7 @@ struct bsg_job;
|
|||
struct blkcg_gq;
|
||||
struct blk_flush_queue;
|
||||
struct pr_ops;
|
||||
struct rq_wb;
|
||||
|
||||
#define BLKDEV_MIN_RQ 4
|
||||
#define BLKDEV_MAX_RQ 128 /* Default maximum */
|
||||
|
|
@ -77,6 +79,55 @@ enum rq_cmd_type_bits {
|
|||
REQ_TYPE_DRV_PRIV, /* driver defined types from here */
|
||||
};
|
||||
|
||||
/*
|
||||
* request flags */
|
||||
typedef __u32 __bitwise req_flags_t;
|
||||
|
||||
/* elevator knows about this request */
|
||||
#define RQF_SORTED ((__force req_flags_t)(1 << 0))
|
||||
/* drive already may have started this one */
|
||||
#define RQF_STARTED ((__force req_flags_t)(1 << 1))
|
||||
/* uses tagged queueing */
|
||||
#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
|
||||
/* may not be passed by ioscheduler */
|
||||
#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
|
||||
/* request for flush sequence */
|
||||
#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
|
||||
/* merge of different types, fail separately */
|
||||
#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
|
||||
/* track inflight for MQ */
|
||||
#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
|
||||
/* don't call prep for this one */
|
||||
#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
|
||||
/* set for "ide_preempt" requests and also for requests for which the SCSI
|
||||
"quiesce" state must be ignored. */
|
||||
#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
|
||||
/* contains copies of user pages */
|
||||
#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
|
||||
/* vaguely specified driver internal error. Ignored by the block layer */
|
||||
#define RQF_FAILED ((__force req_flags_t)(1 << 10))
|
||||
/* don't warn about errors */
|
||||
#define RQF_QUIET ((__force req_flags_t)(1 << 11))
|
||||
/* elevator private data attached */
|
||||
#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
|
||||
/* account I/O stat */
|
||||
#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
|
||||
/* request came from our alloc pool */
|
||||
#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
|
||||
/* runtime pm request */
|
||||
#define RQF_PM ((__force req_flags_t)(1 << 15))
|
||||
/* on IO scheduler merge hash */
|
||||
#define RQF_HASHED ((__force req_flags_t)(1 << 16))
|
||||
/* IO stats tracking on */
|
||||
#define RQF_STATS ((__force req_flags_t)(1 << 17))
|
||||
/* Look at ->special_vec for the actual data payload instead of the
|
||||
bio chain. */
|
||||
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
|
||||
|
||||
/* flags that prevent us from merging requests: */
|
||||
#define RQF_NOMERGE_FLAGS \
|
||||
(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
|
||||
|
||||
#define BLK_MAX_CDB 16
|
||||
|
||||
/*
|
||||
|
|
@ -97,7 +148,8 @@ struct request {
|
|||
|
||||
int cpu;
|
||||
unsigned cmd_type;
|
||||
u64 cmd_flags;
|
||||
unsigned int cmd_flags; /* op and common flags */
|
||||
req_flags_t rq_flags;
|
||||
unsigned long atomic_flags;
|
||||
|
||||
/* the following two fields are internal, NEVER access directly */
|
||||
|
|
@ -126,6 +178,7 @@ struct request {
|
|||
*/
|
||||
union {
|
||||
struct rb_node rb_node; /* sort/lookup */
|
||||
struct bio_vec special_vec;
|
||||
void *completion_data;
|
||||
};
|
||||
|
||||
|
|
@ -151,6 +204,7 @@ struct request {
|
|||
struct gendisk *rq_disk;
|
||||
struct hd_struct *part;
|
||||
unsigned long start_time;
|
||||
struct blk_issue_stat issue_stat;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
struct request_list *rl; /* rl this rq is alloced from */
|
||||
unsigned long long start_time_ns;
|
||||
|
|
@ -198,20 +252,6 @@ struct request {
|
|||
struct request *next_rq;
|
||||
};
|
||||
|
||||
#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
|
||||
#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
|
||||
|
||||
#define req_set_op(req, op) do { \
|
||||
WARN_ON(op >= (1 << REQ_OP_BITS)); \
|
||||
(req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
|
||||
(req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
|
||||
} while (0)
|
||||
|
||||
#define req_set_op_attrs(req, op, flags) do { \
|
||||
req_set_op(req, op); \
|
||||
(req)->cmd_flags |= flags; \
|
||||
} while (0)
|
||||
|
||||
static inline unsigned short req_get_ioprio(struct request *req)
|
||||
{
|
||||
return req->ioprio;
|
||||
|
|
@ -248,7 +288,6 @@ enum blk_queue_state {
|
|||
struct blk_queue_tag {
|
||||
struct request **tag_index; /* map of busy tags */
|
||||
unsigned long *tag_map; /* bit map of free/busy tags */
|
||||
int busy; /* current depth */
|
||||
int max_depth; /* what we will send to device */
|
||||
int real_max_depth; /* what the array can hold */
|
||||
atomic_t refcnt; /* map can be shared */
|
||||
|
|
@ -261,6 +300,15 @@ struct blk_queue_tag {
|
|||
#define BLK_SCSI_MAX_CMDS (256)
|
||||
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
||||
|
||||
/*
|
||||
* Zoned block device models (zoned limit).
|
||||
*/
|
||||
enum blk_zoned_model {
|
||||
BLK_ZONED_NONE, /* Regular block device */
|
||||
BLK_ZONED_HA, /* Host-aware zoned block device */
|
||||
BLK_ZONED_HM, /* Host-managed zoned block device */
|
||||
};
|
||||
|
||||
struct queue_limits {
|
||||
unsigned long bounce_pfn;
|
||||
unsigned long seg_boundary_mask;
|
||||
|
|
@ -278,6 +326,7 @@ struct queue_limits {
|
|||
unsigned int max_discard_sectors;
|
||||
unsigned int max_hw_discard_sectors;
|
||||
unsigned int max_write_same_sectors;
|
||||
unsigned int max_write_zeroes_sectors;
|
||||
unsigned int discard_granularity;
|
||||
unsigned int discard_alignment;
|
||||
|
||||
|
|
@ -290,8 +339,45 @@ struct queue_limits {
|
|||
unsigned char cluster;
|
||||
unsigned char discard_zeroes_data;
|
||||
unsigned char raid_partial_stripes_expensive;
|
||||
enum blk_zoned_model zoned;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
struct blk_zone_report_hdr {
|
||||
unsigned int nr_zones;
|
||||
u8 padding[60];
|
||||
};
|
||||
|
||||
extern int blkdev_report_zones(struct block_device *bdev,
|
||||
sector_t sector, struct blk_zone *zones,
|
||||
unsigned int *nr_zones, gfp_t gfp_mask);
|
||||
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
|
||||
sector_t nr_sectors, gfp_t gfp_mask);
|
||||
|
||||
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
|
||||
#else /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
|
||||
fmode_t mode, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
|
||||
fmode_t mode, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
struct request_queue {
|
||||
/*
|
||||
* Together with queue_head for cacheline sharing
|
||||
|
|
@ -302,6 +388,8 @@ struct request_queue {
|
|||
int nr_rqs[2]; /* # allocated [a]sync rqs */
|
||||
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
|
||||
|
||||
struct rq_wb *rq_wb;
|
||||
|
||||
/*
|
||||
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
|
||||
* is used, root blkg allocates from @q->root_rl and all other
|
||||
|
|
@ -327,6 +415,8 @@ struct request_queue {
|
|||
struct blk_mq_ctx __percpu *queue_ctx;
|
||||
unsigned int nr_queues;
|
||||
|
||||
unsigned int queue_depth;
|
||||
|
||||
/* hw dispatch queues */
|
||||
struct blk_mq_hw_ctx **queue_hw_ctx;
|
||||
unsigned int nr_hw_queues;
|
||||
|
|
@ -412,6 +502,9 @@ struct request_queue {
|
|||
|
||||
unsigned int nr_sorted;
|
||||
unsigned int in_flight[2];
|
||||
|
||||
struct blk_rq_stat rq_stats[2];
|
||||
|
||||
/*
|
||||
* Number of active block driver functions for which blk_drain_queue()
|
||||
* must wait. Must be incremented around functions that unlock the
|
||||
|
|
@ -420,6 +513,7 @@ struct request_queue {
|
|||
unsigned int request_fn_active;
|
||||
|
||||
unsigned int rq_timeout;
|
||||
int poll_nsec;
|
||||
struct timer_list timeout;
|
||||
struct work_struct timeout_work;
|
||||
struct list_head timeout_list;
|
||||
|
|
@ -505,6 +599,7 @@ struct request_queue {
|
|||
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
|
||||
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
|
||||
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
|
||||
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
|
||||
|
||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||
|
|
@ -601,7 +696,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
|||
REQ_FAILFAST_DRIVER))
|
||||
|
||||
#define blk_account_rq(rq) \
|
||||
(((rq)->cmd_flags & REQ_STARTED) && \
|
||||
(((rq)->rq_flags & RQF_STARTED) && \
|
||||
((rq)->cmd_type == REQ_TYPE_FS))
|
||||
|
||||
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
|
||||
|
|
@ -627,17 +722,31 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)
|
|||
return q->limits.cluster;
|
||||
}
|
||||
|
||||
/*
|
||||
* We regard a request as sync, if either a read or a sync write
|
||||
*/
|
||||
static inline bool rw_is_sync(int op, unsigned int rw_flags)
|
||||
static inline enum blk_zoned_model
|
||||
blk_queue_zoned_model(struct request_queue *q)
|
||||
{
|
||||
return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
|
||||
return q->limits.zoned;
|
||||
}
|
||||
|
||||
static inline bool blk_queue_is_zoned(struct request_queue *q)
|
||||
{
|
||||
switch (blk_queue_zoned_model(q)) {
|
||||
case BLK_ZONED_HA:
|
||||
case BLK_ZONED_HM:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_zone_size(struct request_queue *q)
|
||||
{
|
||||
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
|
||||
}
|
||||
|
||||
static inline bool rq_is_sync(struct request *rq)
|
||||
{
|
||||
return rw_is_sync(req_op(rq), rq->cmd_flags);
|
||||
return op_is_sync(rq->cmd_flags);
|
||||
}
|
||||
|
||||
static inline bool blk_rl_full(struct request_list *rl, bool sync)
|
||||
|
|
@ -669,8 +778,13 @@ static inline bool rq_mergeable(struct request *rq)
|
|||
if (req_op(rq) == REQ_OP_FLUSH)
|
||||
return false;
|
||||
|
||||
if (req_op(rq) == REQ_OP_WRITE_ZEROES)
|
||||
return false;
|
||||
|
||||
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
|
||||
return false;
|
||||
if (rq->rq_flags & RQF_NOMERGE_FLAGS)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -683,6 +797,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_queue_depth(struct request_queue *q)
|
||||
{
|
||||
if (q->queue_depth)
|
||||
return q->queue_depth;
|
||||
|
||||
return q->nr_requests;
|
||||
}
|
||||
|
||||
/*
|
||||
* q->prep_rq_fn return values
|
||||
*/
|
||||
|
|
@ -790,8 +912,6 @@ extern void __blk_put_request(struct request_queue *, struct request *);
|
|||
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
|
||||
extern void blk_rq_set_block_pc(struct request *);
|
||||
extern void blk_requeue_request(struct request_queue *, struct request *);
|
||||
extern void blk_add_request_payload(struct request *rq, struct page *page,
|
||||
int offset, unsigned int len);
|
||||
extern int blk_lld_busy(struct request_queue *q);
|
||||
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
struct bio_set *bs, gfp_t gfp_mask,
|
||||
|
|
@ -824,6 +944,7 @@ extern void __blk_run_queue(struct request_queue *q);
|
|||
extern void __blk_run_queue_uncond(struct request_queue *q);
|
||||
extern void blk_run_queue(struct request_queue *);
|
||||
extern void blk_run_queue_async(struct request_queue *q);
|
||||
extern void blk_mq_quiesce_queue(struct request_queue *q);
|
||||
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
||||
struct rq_map_data *, void __user *, unsigned long,
|
||||
gfp_t);
|
||||
|
|
@ -837,7 +958,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
|
|||
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
|
||||
struct request *, int, rq_end_io_fn *);
|
||||
|
||||
bool blk_poll(struct request_queue *q, blk_qc_t cookie);
|
||||
bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
|
||||
|
||||
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
|
||||
{
|
||||
|
|
@ -888,6 +1009,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
|
|||
if (unlikely(op == REQ_OP_WRITE_SAME))
|
||||
return q->limits.max_write_same_sectors;
|
||||
|
||||
if (unlikely(op == REQ_OP_WRITE_ZEROES))
|
||||
return q->limits.max_write_zeroes_sectors;
|
||||
|
||||
return q->limits.max_sectors;
|
||||
}
|
||||
|
||||
|
|
@ -933,6 +1057,20 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
|
|||
return nr_bios;
|
||||
}
|
||||
|
||||
/*
|
||||
* blk_rq_set_prio - associate a request with prio from ioc
|
||||
* @rq: request of interest
|
||||
* @ioc: target iocontext
|
||||
*
|
||||
* Assocate request prio with ioc prio so request based drivers
|
||||
* can leverage priority information.
|
||||
*/
|
||||
static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc)
|
||||
{
|
||||
if (ioc)
|
||||
rq->ioprio = ioc->ioprio;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request issue related functions.
|
||||
*/
|
||||
|
|
@ -991,6 +1129,8 @@ extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
|||
unsigned int max_discard_sectors);
|
||||
extern void blk_queue_max_write_same_sectors(struct request_queue *q,
|
||||
unsigned int max_write_same_sectors);
|
||||
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
|
||||
unsigned int max_write_same_sectors);
|
||||
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_alignment_offset(struct request_queue *q,
|
||||
|
|
@ -999,6 +1139,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
|||
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
||||
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
|
||||
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
|
||||
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
|
||||
extern void blk_set_default_limits(struct queue_limits *lim);
|
||||
extern void blk_set_stacking_limits(struct queue_limits *lim);
|
||||
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||
|
|
@ -1027,6 +1168,13 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
|
|||
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
|
||||
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
||||
|
||||
static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
|
||||
{
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
|
||||
return 1;
|
||||
return rq->nr_phys_segments;
|
||||
}
|
||||
|
||||
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
|
||||
extern void blk_dump_rq_flags(struct request *, char *);
|
||||
extern long nr_blockdev_pages(void);
|
||||
|
|
@ -1057,7 +1205,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q)
|
|||
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
|
||||
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
|
||||
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
|
||||
extern inline void blk_set_runtime_active(struct request_queue *q) {}
|
||||
static inline void blk_set_runtime_active(struct request_queue *q) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
@ -1078,6 +1226,7 @@ struct blk_plug {
|
|||
struct list_head cb_list; /* md requires an unplug callback */
|
||||
};
|
||||
#define BLK_MAX_REQUEST_COUNT 16
|
||||
#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
|
||||
|
||||
struct blk_plug_cb;
|
||||
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
|
||||
|
|
@ -1151,6 +1300,9 @@ extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|||
struct bio **biop);
|
||||
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
|
||||
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
|
||||
bool discard);
|
||||
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, bool discard);
|
||||
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
|
||||
|
|
@ -1354,6 +1506,46 @@ static inline unsigned int bdev_write_same(struct block_device *bdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return q->limits.max_write_zeroes_sectors;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return blk_queue_zoned_model(q);
|
||||
|
||||
return BLK_ZONED_NONE;
|
||||
}
|
||||
|
||||
static inline bool bdev_is_zoned(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return blk_queue_is_zoned(q);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned int bdev_zone_size(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (q)
|
||||
return blk_queue_zone_size(q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int queue_dma_alignment(struct request_queue *q)
|
||||
{
|
||||
return q ? q->dma_alignment : 511;
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq)
|
|||
}
|
||||
|
||||
extern void blk_dump_cmd(char *buf, struct request *rq);
|
||||
extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes);
|
||||
extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
|
||||
|
||||
#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
|
||||
|
||||
|
|
|
|||
92
include/linux/bpf-cgroup.h
Normal file
92
include/linux/bpf-cgroup.h
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
#ifndef _BPF_CGROUP_H
|
||||
#define _BPF_CGROUP_H
|
||||
|
||||
#include <linux/jump_label.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
struct sock;
|
||||
struct cgroup;
|
||||
struct sk_buff;
|
||||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
|
||||
extern struct static_key_false cgroup_bpf_enabled_key;
|
||||
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
|
||||
|
||||
struct cgroup_bpf {
|
||||
/*
|
||||
* Store two sets of bpf_prog pointers, one for programs that are
|
||||
* pinned directly to this cgroup, and one for those that are effective
|
||||
* when this cgroup is accessed.
|
||||
*/
|
||||
struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
|
||||
struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
|
||||
};
|
||||
|
||||
void cgroup_bpf_put(struct cgroup *cgrp);
|
||||
void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
|
||||
|
||||
void __cgroup_bpf_update(struct cgroup *cgrp,
|
||||
struct cgroup *parent,
|
||||
struct bpf_prog *prog,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
|
||||
void cgroup_bpf_update(struct cgroup *cgrp,
|
||||
struct bpf_prog *prog,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
int __cgroup_bpf_run_filter_sk(struct sock *sk,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
|
||||
BPF_CGROUP_INET_INGRESS); \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
|
||||
typeof(sk) __sk = sk_to_full_sk(sk); \
|
||||
if (sk_fullsock(__sk)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
|
||||
BPF_CGROUP_INET_EGRESS); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled && sk) { \
|
||||
__ret = __cgroup_bpf_run_filter_sk(sk, \
|
||||
BPF_CGROUP_INET_SOCK_CREATE); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
struct cgroup_bpf {};
|
||||
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
|
||||
static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
|
||||
struct cgroup *parent) {}
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
|
||||
|
||||
#endif /* CONFIG_CGROUP_BPF */
|
||||
|
||||
#endif /* _BPF_CGROUP_H */
|
||||
|
|
@ -216,6 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
|
|||
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
|
||||
int bpf_prog_calc_digest(struct bpf_prog *fp);
|
||||
|
||||
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
|
||||
|
||||
|
|
@ -233,13 +234,16 @@ void bpf_register_map_type(struct bpf_map_type_list *tl);
|
|||
|
||||
struct bpf_prog *bpf_prog_get(u32 ufd);
|
||||
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
|
||||
struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i);
|
||||
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
|
||||
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
|
||||
void bpf_prog_sub(struct bpf_prog *prog, int i);
|
||||
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
|
||||
void bpf_prog_put(struct bpf_prog *prog);
|
||||
int __bpf_prog_charge(struct user_struct *user, u32 pages);
|
||||
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
|
||||
|
||||
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
|
||||
struct bpf_map *__bpf_map_get(struct fd f);
|
||||
struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
|
||||
struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
|
||||
void bpf_map_put_with_uref(struct bpf_map *map);
|
||||
void bpf_map_put(struct bpf_map *map);
|
||||
int bpf_map_precharge_memlock(u32 pages);
|
||||
|
|
@ -298,18 +302,33 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
|
|||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
static inline struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
|
||||
static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
|
||||
int i)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void bpf_prog_put(struct bpf_prog *prog)
|
||||
{
|
||||
}
|
||||
static inline struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
|
||||
|
||||
static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
/* verifier prototypes for helper functions called from eBPF programs */
|
||||
|
|
@ -319,6 +338,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
|
|||
|
||||
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
|
||||
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
|
||||
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
|
||||
extern const struct bpf_func_proto bpf_tail_call_proto;
|
||||
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
|
||||
|
|
|
|||
|
|
@ -18,19 +18,12 @@
|
|||
|
||||
struct bpf_reg_state {
|
||||
enum bpf_reg_type type;
|
||||
/*
|
||||
* Used to determine if any memory access using this register will
|
||||
* result in a bad access.
|
||||
*/
|
||||
s64 min_value;
|
||||
u64 max_value;
|
||||
union {
|
||||
/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
|
||||
s64 imm;
|
||||
|
||||
/* valid when type == PTR_TO_PACKET* */
|
||||
struct {
|
||||
u32 id;
|
||||
u16 off;
|
||||
u16 range;
|
||||
};
|
||||
|
|
@ -40,6 +33,13 @@ struct bpf_reg_state {
|
|||
*/
|
||||
struct bpf_map *map_ptr;
|
||||
};
|
||||
u32 id;
|
||||
/* Used to determine if any memory access using this register will
|
||||
* result in a bad access. These two fields must be last.
|
||||
* See states_equal()
|
||||
*/
|
||||
s64 min_value;
|
||||
u64 max_value;
|
||||
};
|
||||
|
||||
enum bpf_stack_slot_type {
|
||||
|
|
|
|||
|
|
@ -13,11 +13,13 @@
|
|||
#define PHY_ID_BCM5241 0x0143bc30
|
||||
#define PHY_ID_BCMAC131 0x0143bc70
|
||||
#define PHY_ID_BCM5481 0x0143bca0
|
||||
#define PHY_ID_BCM54810 0x03625d00
|
||||
#define PHY_ID_BCM5482 0x0143bcb0
|
||||
#define PHY_ID_BCM5411 0x00206070
|
||||
#define PHY_ID_BCM5421 0x002060e0
|
||||
#define PHY_ID_BCM5464 0x002060b0
|
||||
#define PHY_ID_BCM5461 0x002060c0
|
||||
#define PHY_ID_BCM54612E 0x03625e60
|
||||
#define PHY_ID_BCM54616S 0x03625d10
|
||||
#define PHY_ID_BCM57780 0x03625d90
|
||||
|
||||
|
|
@ -55,6 +57,7 @@
|
|||
#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
|
||||
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
|
||||
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
|
||||
|
||||
/* Broadcom BCM7xxx specific workarounds */
|
||||
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
|
||||
#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
|
||||
|
|
@ -105,11 +108,15 @@
|
|||
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
|
||||
|
||||
#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
|
||||
#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100
|
||||
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
|
||||
#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8)
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4)
|
||||
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
|
||||
#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007
|
||||
|
||||
/*
|
||||
* Broadcom LED source encodings. These are used in BCM5461, BCM5481,
|
||||
|
|
@ -124,6 +131,7 @@
|
|||
#define BCM_LED_SRC_INTR 0x6
|
||||
#define BCM_LED_SRC_QUALITY 0x7
|
||||
#define BCM_LED_SRC_RCVLED 0x8
|
||||
#define BCM_LED_SRC_WIRESPEED 0x9
|
||||
#define BCM_LED_SRC_MULTICOLOR1 0xa
|
||||
#define BCM_LED_SRC_OPENSHORT 0xb
|
||||
#define BCM_LED_SRC_OFF 0xe /* Tied high */
|
||||
|
|
@ -135,6 +143,14 @@
|
|||
* Shadow values go into bits [14:10] of register 0x1c to select a shadow
|
||||
* register to access.
|
||||
*/
|
||||
|
||||
/* 00100: Reserved control register 2 */
|
||||
#define BCM54XX_SHD_SCR2 0x04
|
||||
#define BCM54XX_SHD_SCR2_WSPD_RTRY_DIS 0x100
|
||||
#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT 2
|
||||
#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET 2
|
||||
#define BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK 0x7
|
||||
|
||||
/* 00101: Spare Control Register 3 */
|
||||
#define BCM54XX_SHD_SCR3 0x05
|
||||
#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
|
||||
|
|
@ -189,6 +205,12 @@
|
|||
#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
|
||||
#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
|
||||
|
||||
/* BCM54810 Registers */
|
||||
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90)
|
||||
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
|
||||
#define BCM54810_SHD_CLK_CTL 0x3
|
||||
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
|
||||
|
||||
|
||||
/*****************************************************************************/
|
||||
/* Fast Ethernet Transceiver definitions. */
|
||||
|
|
@ -222,6 +244,9 @@
|
|||
#define LPI_FEATURE_EN_DIG1000X 0x4000
|
||||
|
||||
/* Core register definitions*/
|
||||
#define MII_BRCM_CORE_BASE12 0x12
|
||||
#define MII_BRCM_CORE_BASE13 0x13
|
||||
#define MII_BRCM_CORE_BASE14 0x14
|
||||
#define MII_BRCM_CORE_BASE1E 0x1E
|
||||
#define MII_BRCM_CORE_EXPB0 0xB0
|
||||
#define MII_BRCM_CORE_EXPB1 0xB1
|
||||
|
|
|
|||
|
|
@ -40,6 +40,8 @@ struct bsg_job {
|
|||
struct device *dev;
|
||||
struct request *req;
|
||||
|
||||
struct kref kref;
|
||||
|
||||
/* Transport/driver specific request/reply structs */
|
||||
void *request;
|
||||
void *reply;
|
||||
|
|
@ -67,5 +69,7 @@ void bsg_job_done(struct bsg_job *job, int result,
|
|||
int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
|
||||
bsg_job_fn *job_fn, int dd_job_size);
|
||||
void bsg_request_fn(struct request_queue *q);
|
||||
void bsg_job_put(struct bsg_job *job);
|
||||
int __must_check bsg_job_get(struct bsg_job *job);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -168,7 +168,12 @@ int inode_has_buffers(struct inode *);
|
|||
void invalidate_inode_buffers(struct inode *);
|
||||
int remove_inode_buffers(struct inode *inode);
|
||||
int sync_mapping_buffers(struct address_space *mapping);
|
||||
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
|
||||
void clean_bdev_aliases(struct block_device *bdev, sector_t block,
|
||||
sector_t len);
|
||||
static inline void clean_bdev_bh_alias(struct buffer_head *bh)
|
||||
{
|
||||
clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
|
||||
}
|
||||
|
||||
void mark_buffer_async_write(struct buffer_head *bh);
|
||||
void __wait_on_buffer(struct buffer_head *);
|
||||
|
|
|
|||
|
|
@ -121,4 +121,21 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
|
|||
}
|
||||
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
/*
|
||||
* Since detected data corruption should stop operation on the affected
|
||||
* structures, this returns false if the corruption condition is found.
|
||||
*/
|
||||
#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
|
||||
do { \
|
||||
if (unlikely(condition)) { \
|
||||
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
|
||||
pr_err(fmt, ##__VA_ARGS__); \
|
||||
BUG(); \
|
||||
} else \
|
||||
WARN(1, fmt, ##__VA_ARGS__); \
|
||||
return false; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* _LINUX_BUG_H */
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ enum cache_type {
|
|||
|
||||
/**
|
||||
* struct cacheinfo - represent a cache leaf node
|
||||
* @id: This cache's id. It is unique among caches with the same (type, level).
|
||||
* @type: type of the cache - data, inst or unified
|
||||
* @level: represents the hierarchy in the multi-level cache
|
||||
* @coherency_line_size: size of each cache line usually representing
|
||||
|
|
@ -44,6 +45,7 @@ enum cache_type {
|
|||
* keeping, the remaining members form the core properties of the cache
|
||||
*/
|
||||
struct cacheinfo {
|
||||
unsigned int id;
|
||||
enum cache_type type;
|
||||
unsigned int level;
|
||||
unsigned int coherency_line_size;
|
||||
|
|
@ -61,6 +63,7 @@ struct cacheinfo {
|
|||
#define CACHE_WRITE_ALLOCATE BIT(3)
|
||||
#define CACHE_ALLOCATE_POLICY_MASK \
|
||||
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
|
||||
#define CACHE_ID BIT(4)
|
||||
|
||||
struct device_node *of_node;
|
||||
bool disable_sysfs;
|
||||
|
|
@ -71,6 +74,7 @@ struct cpu_cacheinfo {
|
|||
struct cacheinfo *info_list;
|
||||
unsigned int num_levels;
|
||||
unsigned int num_leaves;
|
||||
bool cpu_map_populated;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -240,8 +240,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
|
|||
return true;
|
||||
}
|
||||
#endif /* CONFIG_MULTIUSER */
|
||||
extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
|
||||
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
|
||||
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
|
||||
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
|
||||
|
||||
/* audit system wants to get cap info from files as well */
|
||||
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __CPP_H__
|
||||
#define __CPP_H__
|
||||
#ifndef __CCP_H__
|
||||
#define __CCP_H__
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
|
@ -553,7 +553,7 @@ enum ccp_engine {
|
|||
#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002
|
||||
|
||||
/**
|
||||
* struct ccp_cmd - CPP operation request
|
||||
* struct ccp_cmd - CCP operation request
|
||||
* @entry: list element (ccp driver use only)
|
||||
* @work: work element used for callbacks (ccp driver use only)
|
||||
* @ccp: CCP device to be run on (ccp driver use only)
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
1014
include/linux/cec.h
1014
include/linux/cec.h
File diff suppressed because it is too large
Load diff
|
|
@ -64,7 +64,7 @@ struct ceph_auth_client_ops {
|
|||
int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
|
||||
struct ceph_auth_handshake *auth);
|
||||
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
|
||||
struct ceph_authorizer *a, size_t len);
|
||||
struct ceph_authorizer *a);
|
||||
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
|
||||
int peer_type);
|
||||
|
||||
|
|
@ -118,8 +118,7 @@ extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
|
|||
int peer_type,
|
||||
struct ceph_auth_handshake *a);
|
||||
extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
|
||||
struct ceph_authorizer *a,
|
||||
size_t len);
|
||||
struct ceph_authorizer *a);
|
||||
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
|
||||
int peer_type);
|
||||
|
||||
|
|
|
|||
|
|
@ -653,6 +653,9 @@ enum {
|
|||
|
||||
extern const char *ceph_cap_op_name(int op);
|
||||
|
||||
/* flags field in client cap messages (version >= 10) */
|
||||
#define CEPH_CLIENT_CAPS_SYNC (0x1)
|
||||
|
||||
/*
|
||||
* caps message, used for capability callbacks, acks, requests, etc.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -31,6 +31,10 @@ struct ceph_mdsmap {
|
|||
int m_num_data_pg_pools;
|
||||
u64 *m_data_pg_pools;
|
||||
u64 m_cas_pg_pool;
|
||||
|
||||
bool m_enabled;
|
||||
bool m_damaged;
|
||||
int m_num_laggy;
|
||||
};
|
||||
|
||||
static inline struct ceph_entity_addr *
|
||||
|
|
@ -59,5 +63,6 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
|
|||
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
|
||||
extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
|
||||
extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
|
||||
extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef __FS_CEPH_MESSENGER_H
|
||||
#define __FS_CEPH_MESSENGER_H
|
||||
|
||||
#include <linux/blk_types.h>
|
||||
#include <linux/bvec.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/net.h>
|
||||
|
|
@ -30,7 +30,7 @@ struct ceph_connection_operations {
|
|||
struct ceph_auth_handshake *(*get_authorizer) (
|
||||
struct ceph_connection *con,
|
||||
int *proto, int force_new);
|
||||
int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
|
||||
int (*verify_authorizer_reply) (struct ceph_connection *con);
|
||||
int (*invalidate_authorizer)(struct ceph_connection *con);
|
||||
|
||||
/* there was some error on the socket (disconnect, whatever) */
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ struct ceph_osd_request {
|
|||
struct kref r_kref;
|
||||
bool r_mempool;
|
||||
struct completion r_completion;
|
||||
struct completion r_safe_completion; /* fsync waiter */
|
||||
struct completion r_done_completion; /* fsync waiter */
|
||||
ceph_osdc_callback_t r_callback;
|
||||
ceph_osdc_unsafe_callback_t r_unsafe_callback;
|
||||
struct list_head r_unsafe_item;
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/percpu-rwsem.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bpf-cgroup.h>
|
||||
|
||||
#ifdef CONFIG_CGROUPS
|
||||
|
||||
|
|
@ -300,6 +301,9 @@ struct cgroup {
|
|||
/* used to schedule release agent */
|
||||
struct work_struct release_agent_work;
|
||||
|
||||
/* used to store eBPF programs */
|
||||
struct cgroup_bpf bpf;
|
||||
|
||||
/* ids of the ancestors at each level including self */
|
||||
int ancestor_ids[];
|
||||
};
|
||||
|
|
|
|||
|
|
@ -17,8 +17,9 @@
|
|||
#include <linux/notifier.h>
|
||||
|
||||
struct device;
|
||||
|
||||
struct clk;
|
||||
struct device_node;
|
||||
struct of_phandle_args;
|
||||
|
||||
/**
|
||||
* DOC: clk notifier callback types
|
||||
|
|
@ -248,6 +249,23 @@ struct clk *clk_get(struct device *dev, const char *id);
|
|||
*/
|
||||
struct clk *devm_clk_get(struct device *dev, const char *id);
|
||||
|
||||
/**
|
||||
* devm_get_clk_from_child - lookup and obtain a managed reference to a
|
||||
* clock producer from child node.
|
||||
* @dev: device for clock "consumer"
|
||||
* @np: pointer to clock consumer node
|
||||
* @con_id: clock consumer ID
|
||||
*
|
||||
* This function parses the clocks, and uses them to look up the
|
||||
* struct clk from the registered list of clock providers by using
|
||||
* @np and @con_id
|
||||
*
|
||||
* The clock will automatically be freed when the device is unbound
|
||||
* from the bus.
|
||||
*/
|
||||
struct clk *devm_get_clk_from_child(struct device *dev,
|
||||
struct device_node *np, const char *con_id);
|
||||
|
||||
/**
|
||||
* clk_enable - inform the system when the clock source should be running.
|
||||
* @clk: clock source
|
||||
|
|
@ -432,6 +450,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct clk *devm_get_clk_from_child(struct device *dev,
|
||||
struct device_node *np, const char *con_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void clk_put(struct clk *clk) {}
|
||||
|
||||
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
|
||||
|
|
@ -501,9 +525,6 @@ static inline void clk_disable_unprepare(struct clk *clk)
|
|||
clk_unprepare(clk);
|
||||
}
|
||||
|
||||
struct device_node;
|
||||
struct of_phandle_args;
|
||||
|
||||
#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
|
||||
struct clk *of_clk_get(struct device_node *np, int index);
|
||||
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
|
||||
|
|
|
|||
|
|
@ -20,10 +20,6 @@ struct device;
|
|||
struct device_node;
|
||||
struct generic_pm_domain;
|
||||
|
||||
void r8a7778_clocks_init(u32 mode);
|
||||
void r8a7779_clocks_init(u32 mode);
|
||||
void rcar_gen2_clocks_init(u32 mode);
|
||||
|
||||
void cpg_mstp_add_clk_domain(struct device_node *np);
|
||||
#ifdef CONFIG_CLK_RENESAS_CPG_MSTP
|
||||
int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev);
|
||||
|
|
|
|||
|
|
@ -75,8 +75,8 @@ struct module;
|
|||
* structure.
|
||||
*/
|
||||
struct clocksource {
|
||||
cycle_t (*read)(struct clocksource *cs);
|
||||
cycle_t mask;
|
||||
u64 (*read)(struct clocksource *cs);
|
||||
u64 mask;
|
||||
u32 mult;
|
||||
u32 shift;
|
||||
u64 max_idle_ns;
|
||||
|
|
@ -98,8 +98,8 @@ struct clocksource {
|
|||
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
|
||||
/* Watchdog related data, used by the framework */
|
||||
struct list_head wd_list;
|
||||
cycle_t cs_last;
|
||||
cycle_t wd_last;
|
||||
u64 cs_last;
|
||||
u64 wd_last;
|
||||
#endif
|
||||
struct module *owner;
|
||||
};
|
||||
|
|
@ -117,7 +117,7 @@ struct clocksource {
|
|||
#define CLOCK_SOURCE_RESELECT 0x100
|
||||
|
||||
/* simplify initialization of mask field */
|
||||
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
|
||||
#define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
|
||||
|
||||
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
|
||||
{
|
||||
|
|
@ -169,11 +169,14 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
|
|||
* @mult: cycle to nanosecond multiplier
|
||||
* @shift: cycle to nanosecond divisor (power of two)
|
||||
*
|
||||
* Converts cycles to nanoseconds, using the given mult and shift.
|
||||
* Converts clocksource cycles to nanoseconds, using the given @mult and @shift.
|
||||
* The code is optimized for performance and is not intended to work
|
||||
* with absolute clocksource cycles (as those will easily overflow),
|
||||
* but is only intended to be used with relative (delta) clocksource cycles.
|
||||
*
|
||||
* XXX - This could use some mult_lxl_ll() asm optimization
|
||||
*/
|
||||
static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
|
||||
static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
|
||||
{
|
||||
return ((u64) cycles * mult) >> shift;
|
||||
}
|
||||
|
|
@ -233,13 +236,13 @@ static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz
|
|||
|
||||
extern int timekeeping_notify(struct clocksource *clock);
|
||||
|
||||
extern cycle_t clocksource_mmio_readl_up(struct clocksource *);
|
||||
extern cycle_t clocksource_mmio_readl_down(struct clocksource *);
|
||||
extern cycle_t clocksource_mmio_readw_up(struct clocksource *);
|
||||
extern cycle_t clocksource_mmio_readw_down(struct clocksource *);
|
||||
extern u64 clocksource_mmio_readl_up(struct clocksource *);
|
||||
extern u64 clocksource_mmio_readl_down(struct clocksource *);
|
||||
extern u64 clocksource_mmio_readw_up(struct clocksource *);
|
||||
extern u64 clocksource_mmio_readw_down(struct clocksource *);
|
||||
|
||||
extern int clocksource_mmio_init(void __iomem *, const char *,
|
||||
unsigned long, int, unsigned, cycle_t (*)(struct clocksource *));
|
||||
unsigned long, int, unsigned, u64 (*)(struct clocksource *));
|
||||
|
||||
extern int clocksource_i8253_init(void);
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
#ifndef __CMA_H__
|
||||
#define __CMA_H__
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* There is always at least global CMA area and a few optional
|
||||
* areas configured in kernel .config.
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@
|
|||
* clobbered. The issue is as follows: while the inline asm might
|
||||
* access any memory it wants, the compiler could have fit all of
|
||||
* @ptr into memory registers instead, and since @ptr never escaped
|
||||
* from that, it proofed that the inline asm wasn't touching any of
|
||||
* from that, it proved that the inline asm wasn't touching any of
|
||||
* it. This version works well with both compilers, i.e. we're telling
|
||||
* the compiler that the inline asm absolutely may see the contents
|
||||
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
|
||||
|
|
|
|||
|
|
@ -35,14 +35,11 @@
|
|||
#ifndef _CONFIGFS_H_
|
||||
#define _CONFIGFS_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/stat.h> /* S_IRUGO */
|
||||
#include <linux/types.h> /* ssize_t */
|
||||
#include <linux/list.h> /* struct list_head */
|
||||
#include <linux/kref.h> /* struct kref */
|
||||
#include <linux/mutex.h> /* struct mutex */
|
||||
|
||||
#define CONFIGFS_ITEM_NAME_LEN 20
|
||||
|
||||
|
|
@ -228,7 +225,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
|
|||
struct configfs_item_operations {
|
||||
void (*release)(struct config_item *);
|
||||
int (*allow_link)(struct config_item *src, struct config_item *target);
|
||||
int (*drop_link)(struct config_item *src, struct config_item *target);
|
||||
void (*drop_link)(struct config_item *src, struct config_item *target);
|
||||
};
|
||||
|
||||
struct configfs_group_operations {
|
||||
|
|
|
|||
|
|
@ -28,9 +28,17 @@ struct tty_struct;
|
|||
#define VT100ID "\033[?1;2c"
|
||||
#define VT102ID "\033[?6c"
|
||||
|
||||
enum con_scroll {
|
||||
SM_UP,
|
||||
SM_DOWN,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct consw - callbacks for consoles
|
||||
*
|
||||
* @con_scroll: move lines from @top to @bottom in direction @dir by @lines.
|
||||
* Return true if no generic handling should be done.
|
||||
* Invoked by csi_M and printing to the console.
|
||||
* @con_set_palette: sets the palette of the console to @table (optional)
|
||||
* @con_scrolldelta: the contents of the console should be scrolled by @lines.
|
||||
* Invoked by user. (optional)
|
||||
|
|
@ -44,7 +52,9 @@ struct consw {
|
|||
void (*con_putc)(struct vc_data *, int, int, int);
|
||||
void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
|
||||
void (*con_cursor)(struct vc_data *, int);
|
||||
int (*con_scroll)(struct vc_data *, int, int, int, int);
|
||||
bool (*con_scroll)(struct vc_data *, unsigned int top,
|
||||
unsigned int bottom, enum con_scroll dir,
|
||||
unsigned int lines);
|
||||
int (*con_switch)(struct vc_data *);
|
||||
int (*con_blank)(struct vc_data *, int, int);
|
||||
int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
|
||||
|
|
@ -99,10 +109,6 @@ static inline int con_debug_leave(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* scroll */
|
||||
#define SM_UP (1)
|
||||
#define SM_DOWN (2)
|
||||
|
||||
/* cursor */
|
||||
#define CM_DRAW (1)
|
||||
#define CM_ERASE (2)
|
||||
|
|
|
|||
|
|
@ -57,9 +57,6 @@ struct notifier_block;
|
|||
|
||||
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
||||
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
||||
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
||||
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
|
||||
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
|
||||
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
|
||||
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
|
||||
* lock is dropped */
|
||||
|
|
@ -80,87 +77,14 @@ struct notifier_block;
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
extern bool cpuhp_tasks_frozen;
|
||||
/* Need to know about CPUs going up/down? */
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
|
||||
#define cpu_notifier(fn, pri) { \
|
||||
static struct notifier_block fn##_nb = \
|
||||
{ .notifier_call = fn, .priority = pri }; \
|
||||
register_cpu_notifier(&fn##_nb); \
|
||||
}
|
||||
|
||||
#define __cpu_notifier(fn, pri) { \
|
||||
static struct notifier_block fn##_nb = \
|
||||
{ .notifier_call = fn, .priority = pri }; \
|
||||
__register_cpu_notifier(&fn##_nb); \
|
||||
}
|
||||
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
||||
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int register_cpu_notifier(struct notifier_block *nb);
|
||||
extern int __register_cpu_notifier(struct notifier_block *nb);
|
||||
extern void unregister_cpu_notifier(struct notifier_block *nb);
|
||||
extern void __unregister_cpu_notifier(struct notifier_block *nb);
|
||||
#else
|
||||
|
||||
#ifndef MODULE
|
||||
extern int register_cpu_notifier(struct notifier_block *nb);
|
||||
extern int __register_cpu_notifier(struct notifier_block *nb);
|
||||
#else
|
||||
static inline int register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int cpu_up(unsigned int cpu);
|
||||
void notify_cpu_starting(unsigned int cpu);
|
||||
extern void cpu_maps_update_begin(void);
|
||||
extern void cpu_maps_update_done(void);
|
||||
|
||||
#define cpu_notifier_register_begin cpu_maps_update_begin
|
||||
#define cpu_notifier_register_done cpu_maps_update_done
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
#define cpuhp_tasks_frozen 0
|
||||
|
||||
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
|
||||
static inline int register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cpu_maps_update_begin(void)
|
||||
{
|
||||
}
|
||||
|
|
@ -169,14 +93,6 @@ static inline void cpu_maps_update_done(void)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void cpu_notifier_register_begin(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cpu_notifier_register_done(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
extern struct bus_type cpu_subsys;
|
||||
|
||||
|
|
@ -189,12 +105,6 @@ extern void get_online_cpus(void);
|
|||
extern void put_online_cpus(void);
|
||||
extern void cpu_hotplug_disable(void);
|
||||
extern void cpu_hotplug_enable(void);
|
||||
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
|
||||
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
|
||||
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
|
||||
#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
|
||||
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
|
||||
#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
|
||||
void clear_tasks_mm_cpumask(int cpu);
|
||||
int cpu_down(unsigned int cpu);
|
||||
|
||||
|
|
@ -206,13 +116,6 @@ static inline void cpu_hotplug_done(void) {}
|
|||
#define put_online_cpus() do { } while (0)
|
||||
#define cpu_hotplug_disable() do { } while (0)
|
||||
#define cpu_hotplug_enable() do { } while (0)
|
||||
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
/* These aren't inline functions due to a GCC bug. */
|
||||
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
||||
#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
|
||||
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
|
||||
#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP_SMP
|
||||
|
|
@ -245,6 +148,8 @@ void arch_cpu_idle_dead(void);
|
|||
int cpu_report_state(int cpu);
|
||||
int cpu_check_up_prepare(int cpu);
|
||||
void cpu_set_state_online(int cpu);
|
||||
void play_idle(unsigned long duration_ms);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
bool cpu_wait_death(unsigned int cpu, int seconds);
|
||||
bool cpu_report_death(void);
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ void disable_cpufreq(void);
|
|||
|
||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
|
||||
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
|
||||
int cpufreq_update_policy(unsigned int cpu);
|
||||
void cpufreq_update_policy(unsigned int cpu);
|
||||
bool have_governor_per_policy(void);
|
||||
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
|
||||
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
|
||||
|
|
@ -234,6 +234,10 @@ __ATTR(_name, _perm, show_##_name, NULL)
|
|||
static struct freq_attr _name = \
|
||||
__ATTR(_name, 0644, show_##_name, store_##_name)
|
||||
|
||||
#define cpufreq_freq_attr_wo(_name) \
|
||||
static struct freq_attr _name = \
|
||||
__ATTR(_name, 0200, NULL, store_##_name)
|
||||
|
||||
struct global_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct kobject *kobj,
|
||||
|
|
|
|||
|
|
@ -16,9 +16,11 @@ enum cpuhp_state {
|
|||
CPUHP_PERF_SUPERH,
|
||||
CPUHP_X86_HPET_DEAD,
|
||||
CPUHP_X86_APB_DEAD,
|
||||
CPUHP_X86_MCE_DEAD,
|
||||
CPUHP_VIRT_NET_DEAD,
|
||||
CPUHP_SLUB_DEAD,
|
||||
CPUHP_MM_WRITEBACK_DEAD,
|
||||
CPUHP_MM_VMSTAT_DEAD,
|
||||
CPUHP_SOFTIRQ_DEAD,
|
||||
CPUHP_NET_MVNETA_DEAD,
|
||||
CPUHP_CPUIDLE_DEAD,
|
||||
|
|
@ -30,6 +32,18 @@ enum cpuhp_state {
|
|||
CPUHP_ACPI_CPUDRV_DEAD,
|
||||
CPUHP_S390_PFAULT_DEAD,
|
||||
CPUHP_BLK_MQ_DEAD,
|
||||
CPUHP_FS_BUFF_DEAD,
|
||||
CPUHP_PRINTK_DEAD,
|
||||
CPUHP_MM_MEMCQ_DEAD,
|
||||
CPUHP_PERCPU_CNT_DEAD,
|
||||
CPUHP_RADIX_DEAD,
|
||||
CPUHP_PAGE_ALLOC_DEAD,
|
||||
CPUHP_NET_DEV_DEAD,
|
||||
CPUHP_PCI_XGENE_DEAD,
|
||||
CPUHP_IOMMU_INTEL_DEAD,
|
||||
CPUHP_LUSTRE_CFS_DEAD,
|
||||
CPUHP_SCSI_BNX2FC_DEAD,
|
||||
CPUHP_SCSI_BNX2I_DEAD,
|
||||
CPUHP_WORKQUEUE_PREP,
|
||||
CPUHP_POWER_NUMA_PREPARE,
|
||||
CPUHP_HRTIMERS_PREPARE,
|
||||
|
|
@ -45,12 +59,20 @@ enum cpuhp_state {
|
|||
CPUHP_POWERPC_MMU_CTX_PREPARE,
|
||||
CPUHP_XEN_PREPARE,
|
||||
CPUHP_XEN_EVTCHN_PREPARE,
|
||||
CPUHP_NOTIFY_PREPARE,
|
||||
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
|
||||
CPUHP_SH_SH3X_PREPARE,
|
||||
CPUHP_BLK_MQ_PREPARE,
|
||||
CPUHP_NET_FLOW_PREPARE,
|
||||
CPUHP_TOPOLOGY_PREPARE,
|
||||
CPUHP_NET_IUCV_PREPARE,
|
||||
CPUHP_ARM_BL_PREPARE,
|
||||
CPUHP_TRACE_RB_PREPARE,
|
||||
CPUHP_MM_ZS_PREPARE,
|
||||
CPUHP_MM_ZSWP_MEM_PREPARE,
|
||||
CPUHP_MM_ZSWP_POOL_PREPARE,
|
||||
CPUHP_KVM_PPC_BOOK3S_PREPARE,
|
||||
CPUHP_ZCOMP_PREPARE,
|
||||
CPUHP_TIMERS_DEAD,
|
||||
CPUHP_NOTF_ERR_INJ_PREPARE,
|
||||
CPUHP_MIPS_SOC_PREPARE,
|
||||
CPUHP_BRINGUP_CPU,
|
||||
CPUHP_AP_IDLE_DEAD,
|
||||
|
|
@ -58,10 +80,8 @@ enum cpuhp_state {
|
|||
CPUHP_AP_SCHED_STARTING,
|
||||
CPUHP_AP_RCUTREE_DYING,
|
||||
CPUHP_AP_IRQ_GIC_STARTING,
|
||||
CPUHP_AP_IRQ_GICV3_STARTING,
|
||||
CPUHP_AP_IRQ_HIP04_STARTING,
|
||||
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
|
||||
CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
|
||||
CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||
CPUHP_AP_PERF_X86_UNCORE_STARTING,
|
||||
|
|
@ -80,7 +100,6 @@ enum cpuhp_state {
|
|||
CPUHP_AP_ARM_L2X0_STARTING,
|
||||
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
|
||||
CPUHP_AP_DUMMY_TIMER_STARTING,
|
||||
CPUHP_AP_JCORE_TIMER_STARTING,
|
||||
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_TWD_STARTING,
|
||||
|
|
@ -94,9 +113,10 @@ enum cpuhp_state {
|
|||
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
|
||||
CPUHP_AP_KVM_ARM_VGIC_STARTING,
|
||||
CPUHP_AP_KVM_ARM_TIMER_STARTING,
|
||||
/* Must be the last timer callback */
|
||||
CPUHP_AP_DUMMY_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_XEN_STARTING,
|
||||
CPUHP_AP_ARM_CORESIGHT_STARTING,
|
||||
CPUHP_AP_ARM_CORESIGHT4_STARTING,
|
||||
CPUHP_AP_ARM64_ISNDEP_STARTING,
|
||||
CPUHP_AP_SMPCFD_DYING,
|
||||
CPUHP_AP_X86_TBOOT_DYING,
|
||||
|
|
@ -120,7 +140,6 @@ enum cpuhp_state {
|
|||
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
|
||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||
CPUHP_AP_RCUTREE_ONLINE,
|
||||
CPUHP_AP_NOTIFY_ONLINE,
|
||||
CPUHP_AP_ONLINE_DYN,
|
||||
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
||||
CPUHP_AP_X86_HPET_ONLINE,
|
||||
|
|
|
|||
|
|
@ -74,6 +74,7 @@ struct cpuidle_driver_kobj;
|
|||
struct cpuidle_device {
|
||||
unsigned int registered:1;
|
||||
unsigned int enabled:1;
|
||||
unsigned int use_deepest_state:1;
|
||||
unsigned int cpu;
|
||||
|
||||
int last_residency;
|
||||
|
|
@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
|
|||
static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
extern void cpuidle_use_deepest_state(bool enable);
|
||||
#else
|
||||
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
|
|
@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
|
|||
static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{return -ENODEV; }
|
||||
static inline void cpuidle_use_deepest_state(bool enable)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/* kernel/sched/idle.c */
|
||||
|
|
@ -235,8 +240,6 @@ struct cpuidle_governor {
|
|||
int (*select) (struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
void (*reflect) (struct cpuidle_device *dev, int index);
|
||||
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
|
|
|
|||
|
|
@ -722,6 +722,11 @@ void init_cpu_present(const struct cpumask *src);
|
|||
void init_cpu_possible(const struct cpumask *src);
|
||||
void init_cpu_online(const struct cpumask *src);
|
||||
|
||||
static inline void reset_cpu_possible_mask(void)
|
||||
{
|
||||
bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_cpu_possible(unsigned int cpu, bool possible)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@
|
|||
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
|
||||
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
|
||||
#define CRYPTO_ALG_TYPE_KPP 0x00000008
|
||||
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
|
||||
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
|
||||
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
|
||||
#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
|
||||
#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
|
||||
|
|
@ -60,6 +62,7 @@
|
|||
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
|
||||
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
|
||||
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
|
||||
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
|
||||
|
||||
#define CRYPTO_ALG_LARVAL 0x00000010
|
||||
#define CRYPTO_ALG_DEAD 0x00000020
|
||||
|
|
@ -87,7 +90,7 @@
|
|||
#define CRYPTO_ALG_TESTED 0x00000400
|
||||
|
||||
/*
|
||||
* Set if the algorithm is an instance that is build from templates.
|
||||
* Set if the algorithm is an instance that is built from templates.
|
||||
*/
|
||||
#define CRYPTO_ALG_INSTANCE 0x00000800
|
||||
|
||||
|
|
@ -960,7 +963,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
|
|||
* ablkcipher_request_set_callback() - set asynchronous callback function
|
||||
* @req: request handle
|
||||
* @flags: specify zero or an ORing of the flags
|
||||
* CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
|
||||
* CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
|
||||
* increase the wait queue beyond the initial maximum size;
|
||||
* CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
|
||||
* @compl: callback function pointer to be registered with the request handle
|
||||
|
|
@ -977,7 +980,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
|
|||
* cipher operation completes.
|
||||
*
|
||||
* The callback function is registered with the ablkcipher_request handle and
|
||||
* must comply with the following template
|
||||
* must comply with the following template::
|
||||
*
|
||||
* void callback_function(struct crypto_async_request *req, int error)
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -8,25 +8,44 @@
|
|||
|
||||
struct iomap_ops;
|
||||
|
||||
/* We use lowest available exceptional entry bit for locking */
|
||||
/*
|
||||
* We use lowest available bit in exceptional entry for locking, one bit for
|
||||
* the entry size (PMD) and two more to tell us if the entry is a huge zero
|
||||
* page (HZP) or an empty entry that is just used for locking. In total four
|
||||
* special bits.
|
||||
*
|
||||
* If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
|
||||
* EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
|
||||
* block allocation.
|
||||
*/
|
||||
#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
|
||||
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
|
||||
#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
|
||||
#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
|
||||
#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
|
||||
|
||||
ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
static inline unsigned long dax_radix_sector(void *entry)
|
||||
{
|
||||
return (unsigned long)entry >> RADIX_DAX_SHIFT;
|
||||
}
|
||||
|
||||
static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
|
||||
{
|
||||
return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
|
||||
((unsigned long)sector << RADIX_DAX_SHIFT) |
|
||||
RADIX_DAX_ENTRY_LOCK);
|
||||
}
|
||||
|
||||
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
struct iomap_ops *ops);
|
||||
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
|
||||
get_block_t, dio_iodone_t, int flags);
|
||||
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
|
||||
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
|
||||
int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
struct iomap_ops *ops);
|
||||
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
||||
pgoff_t index, bool wake_all);
|
||||
pgoff_t index, void *entry, bool wake_all);
|
||||
|
||||
#ifdef CONFIG_FS_DAX
|
||||
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
|
||||
void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
|
||||
unsigned int offset, unsigned int length);
|
||||
#else
|
||||
|
|
@ -35,12 +54,6 @@ static inline struct page *read_dax_sector(struct block_device *bdev,
|
|||
{
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
/* Shouldn't ever be called when dax is disabled. */
|
||||
static inline void dax_unlock_mapping_entry(struct address_space *mapping,
|
||||
pgoff_t index)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
static inline int __dax_zero_page_range(struct block_device *bdev,
|
||||
sector_t sector, unsigned int offset, unsigned int length)
|
||||
{
|
||||
|
|
@ -48,18 +61,28 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
|
|||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
|
||||
unsigned int flags, get_block_t);
|
||||
#ifdef CONFIG_FS_DAX_PMD
|
||||
static inline unsigned int dax_radix_order(void *entry)
|
||||
{
|
||||
if ((unsigned long)entry & RADIX_DAX_PMD)
|
||||
return PMD_SHIFT - PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmd, unsigned int flags, struct iomap_ops *ops);
|
||||
#else
|
||||
static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
|
||||
pmd_t *pmd, unsigned int flags, get_block_t gb)
|
||||
static inline unsigned int dax_radix_order(void *entry)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd, unsigned int flags,
|
||||
struct iomap_ops *ops)
|
||||
{
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
#endif
|
||||
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
|
||||
#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
|
||||
|
||||
static inline bool vma_is_dax(struct vm_area_struct *vma)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ struct dentry_operations {
|
|||
void (*d_iput)(struct dentry *, struct inode *);
|
||||
char *(*d_dname)(struct dentry *, char *, int);
|
||||
struct vfsmount *(*d_automount)(struct path *);
|
||||
int (*d_manage)(struct dentry *, bool);
|
||||
int (*d_manage)(const struct path *, bool);
|
||||
struct dentry *(*d_real)(struct dentry *, const struct inode *,
|
||||
unsigned int);
|
||||
} ____cacheline_aligned;
|
||||
|
|
@ -254,7 +254,7 @@ extern struct dentry *d_find_alias(struct inode *);
|
|||
extern void d_prune_aliases(struct inode *);
|
||||
|
||||
/* test whether we have any submounts in a subdir tree */
|
||||
extern int have_submounts(struct dentry *);
|
||||
extern int path_has_submounts(const struct path *);
|
||||
|
||||
/*
|
||||
* This adds the entry to the hash queues.
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ void dcookie_unregister(struct dcookie_user * user);
|
|||
*
|
||||
* Returns 0 on success, with *cookie filled in
|
||||
*/
|
||||
int get_dcookie(struct path *path, unsigned long *cookie);
|
||||
int get_dcookie(const struct path *path, unsigned long *cookie);
|
||||
|
||||
#else
|
||||
|
||||
|
|
@ -58,7 +58,7 @@ static inline void dcookie_unregister(struct dcookie_user * user)
|
|||
return;
|
||||
}
|
||||
|
||||
static inline int get_dcookie(struct path *path, unsigned long *cookie)
|
||||
static inline int get_dcookie(const struct path *path, unsigned long *cookie)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,7 +52,8 @@ extern struct srcu_struct debugfs_srcu;
|
|||
* Must only be called under the protection established by
|
||||
* debugfs_use_file_start().
|
||||
*/
|
||||
static inline const struct file_operations *debugfs_real_fops(struct file *filp)
|
||||
static inline const struct file_operations *
|
||||
debugfs_real_fops(const struct file *filp)
|
||||
__must_hold(&debugfs_srcu)
|
||||
{
|
||||
/*
|
||||
|
|
@ -62,6 +63,21 @@ static inline const struct file_operations *debugfs_real_fops(struct file *filp)
|
|||
return filp->f_path.dentry->d_fsdata;
|
||||
}
|
||||
|
||||
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
|
||||
static int __fops ## _open(struct inode *inode, struct file *file) \
|
||||
{ \
|
||||
__simple_attr_check_format(__fmt, 0ull); \
|
||||
return simple_attr_open(inode, file, __get, __set, __fmt); \
|
||||
} \
|
||||
static const struct file_operations __fops = { \
|
||||
.owner = THIS_MODULE, \
|
||||
.open = __fops ## _open, \
|
||||
.release = simple_attr_release, \
|
||||
.read = debugfs_attr_read, \
|
||||
.write = debugfs_attr_write, \
|
||||
.llseek = generic_file_llseek, \
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
struct dentry *debugfs_create_file(const char *name, umode_t mode,
|
||||
|
|
@ -99,21 +115,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
|
|||
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
|
||||
size_t len, loff_t *ppos);
|
||||
|
||||
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
|
||||
static int __fops ## _open(struct inode *inode, struct file *file) \
|
||||
{ \
|
||||
__simple_attr_check_format(__fmt, 0ull); \
|
||||
return simple_attr_open(inode, file, __get, __set, __fmt); \
|
||||
} \
|
||||
static const struct file_operations __fops = { \
|
||||
.owner = THIS_MODULE, \
|
||||
.open = __fops ## _open, \
|
||||
.release = simple_attr_release, \
|
||||
.read = debugfs_attr_read, \
|
||||
.write = debugfs_attr_write, \
|
||||
.llseek = generic_file_llseek, \
|
||||
}
|
||||
|
||||
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
|
||||
struct dentry *new_dir, const char *new_name);
|
||||
|
||||
|
|
@ -233,8 +234,18 @@ static inline void debugfs_use_file_finish(int srcu_idx)
|
|||
__releases(&debugfs_srcu)
|
||||
{ }
|
||||
|
||||
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
|
||||
static const struct file_operations __fops = { 0 }
|
||||
static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
|
||||
size_t len, loff_t *ppos)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline ssize_t debugfs_attr_write(struct file *file,
|
||||
const char __user *buf,
|
||||
size_t len, loff_t *ppos)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
|
||||
struct dentry *new_dir, char *new_name)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@
|
|||
#include <linux/devfreq.h>
|
||||
#include <linux/thermal.h>
|
||||
|
||||
#ifdef CONFIG_DEVFREQ_THERMAL
|
||||
|
||||
/**
|
||||
* struct devfreq_cooling_power - Devfreq cooling power ops
|
||||
|
|
@ -37,12 +36,16 @@
|
|||
* @dyn_power_coeff * frequency * voltage^2
|
||||
*/
|
||||
struct devfreq_cooling_power {
|
||||
unsigned long (*get_static_power)(unsigned long voltage);
|
||||
unsigned long (*get_dynamic_power)(unsigned long freq,
|
||||
unsigned long (*get_static_power)(struct devfreq *devfreq,
|
||||
unsigned long voltage);
|
||||
unsigned long (*get_dynamic_power)(struct devfreq *devfreq,
|
||||
unsigned long freq,
|
||||
unsigned long voltage);
|
||||
unsigned long dyn_power_coeff;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEVFREQ_THERMAL
|
||||
|
||||
struct thermal_cooling_device *
|
||||
of_devfreq_cooling_register_power(struct device_node *np, struct devfreq *df,
|
||||
struct devfreq_cooling_power *dfc_power);
|
||||
|
|
|
|||
|
|
@ -362,6 +362,7 @@ int subsys_virtual_register(struct bus_type *subsys,
|
|||
* @name: Name of the class.
|
||||
* @owner: The module owner.
|
||||
* @class_attrs: Default attributes of this class.
|
||||
* @class_groups: Default attributes of this class.
|
||||
* @dev_groups: Default attributes of the devices that belong to the class.
|
||||
* @dev_kobj: The kobject that represents this class and links it into the hierarchy.
|
||||
* @dev_uevent: Called when a device is added, removed from this class, or a
|
||||
|
|
@ -390,6 +391,7 @@ struct class {
|
|||
struct module *owner;
|
||||
|
||||
struct class_attribute *class_attrs;
|
||||
const struct attribute_group **class_groups;
|
||||
const struct attribute_group **dev_groups;
|
||||
struct kobject *dev_kobj;
|
||||
|
||||
|
|
@ -465,6 +467,8 @@ struct class_attribute {
|
|||
struct class_attribute class_attr_##_name = __ATTR_RW(_name)
|
||||
#define CLASS_ATTR_RO(_name) \
|
||||
struct class_attribute class_attr_##_name = __ATTR_RO(_name)
|
||||
#define CLASS_ATTR_WO(_name) \
|
||||
struct class_attribute class_attr_##_name = __ATTR_WO(_name)
|
||||
|
||||
extern int __must_check class_create_file_ns(struct class *class,
|
||||
const struct class_attribute *attr,
|
||||
|
|
@ -698,6 +702,25 @@ static inline int devm_add_action_or_reset(struct device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_alloc_percpu - Resource-managed alloc_percpu
|
||||
* @dev: Device to allocate per-cpu memory for
|
||||
* @type: Type to allocate per-cpu memory for
|
||||
*
|
||||
* Managed alloc_percpu. Per-cpu memory allocated with this function is
|
||||
* automatically freed on driver detach.
|
||||
*
|
||||
* RETURNS:
|
||||
* Pointer to allocated memory on success, NULL on failure.
|
||||
*/
|
||||
#define devm_alloc_percpu(dev, type) \
|
||||
((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
|
||||
__alignof__(type)))
|
||||
|
||||
void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
|
||||
size_t align);
|
||||
void devm_free_percpu(struct device *dev, void __percpu *pdata);
|
||||
|
||||
struct device_dma_parameters {
|
||||
/*
|
||||
* a low level driver may set these to teach IOMMU code about
|
||||
|
|
@ -707,6 +730,87 @@ struct device_dma_parameters {
|
|||
unsigned long segment_boundary_mask;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum device_link_state - Device link states.
|
||||
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
|
||||
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
|
||||
* @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
|
||||
* @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
|
||||
* @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
|
||||
* @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
|
||||
*/
|
||||
enum device_link_state {
|
||||
DL_STATE_NONE = -1,
|
||||
DL_STATE_DORMANT = 0,
|
||||
DL_STATE_AVAILABLE,
|
||||
DL_STATE_CONSUMER_PROBE,
|
||||
DL_STATE_ACTIVE,
|
||||
DL_STATE_SUPPLIER_UNBIND,
|
||||
};
|
||||
|
||||
/*
|
||||
* Device link flags.
|
||||
*
|
||||
* STATELESS: The core won't track the presence of supplier/consumer drivers.
|
||||
* AUTOREMOVE: Remove this link automatically on consumer driver unbind.
|
||||
* PM_RUNTIME: If set, the runtime PM framework will use this link.
|
||||
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
|
||||
*/
|
||||
#define DL_FLAG_STATELESS BIT(0)
|
||||
#define DL_FLAG_AUTOREMOVE BIT(1)
|
||||
#define DL_FLAG_PM_RUNTIME BIT(2)
|
||||
#define DL_FLAG_RPM_ACTIVE BIT(3)
|
||||
|
||||
/**
|
||||
* struct device_link - Device link representation.
|
||||
* @supplier: The device on the supplier end of the link.
|
||||
* @s_node: Hook to the supplier device's list of links to consumers.
|
||||
* @consumer: The device on the consumer end of the link.
|
||||
* @c_node: Hook to the consumer device's list of links to suppliers.
|
||||
* @status: The state of the link (with respect to the presence of drivers).
|
||||
* @flags: Link flags.
|
||||
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
|
||||
* @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
|
||||
*/
|
||||
struct device_link {
|
||||
struct device *supplier;
|
||||
struct list_head s_node;
|
||||
struct device *consumer;
|
||||
struct list_head c_node;
|
||||
enum device_link_state status;
|
||||
u32 flags;
|
||||
bool rpm_active;
|
||||
#ifdef CONFIG_SRCU
|
||||
struct rcu_head rcu_head;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dl_dev_state - Device driver presence tracking information.
|
||||
* @DL_DEV_NO_DRIVER: There is no driver attached to the device.
|
||||
* @DL_DEV_PROBING: A driver is probing.
|
||||
* @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
|
||||
* @DL_DEV_UNBINDING: The driver is unbinding from the device.
|
||||
*/
|
||||
enum dl_dev_state {
|
||||
DL_DEV_NO_DRIVER = 0,
|
||||
DL_DEV_PROBING,
|
||||
DL_DEV_DRIVER_BOUND,
|
||||
DL_DEV_UNBINDING,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dev_links_info - Device data related to device links.
|
||||
* @suppliers: List of links to supplier devices.
|
||||
* @consumers: List of links to consumer devices.
|
||||
* @status: Driver status information.
|
||||
*/
|
||||
struct dev_links_info {
|
||||
struct list_head suppliers;
|
||||
struct list_head consumers;
|
||||
enum dl_dev_state status;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct device - The basic device structure
|
||||
* @parent: The device's "parent" device, the device to which it is attached.
|
||||
|
|
@ -732,8 +836,9 @@ struct device_dma_parameters {
|
|||
* on. This shrinks the "Board Support Packages" (BSPs) and
|
||||
* minimizes board-specific #ifdefs in drivers.
|
||||
* @driver_data: Private pointer for driver specific info.
|
||||
* @links: Links to suppliers and consumers of this device.
|
||||
* @power: For device power management.
|
||||
* See Documentation/power/devices.txt for details.
|
||||
* See Documentation/power/admin-guide/devices.rst for details.
|
||||
* @pm_domain: Provide callbacks that are executed during system suspend,
|
||||
* hibernation, system resume and during runtime PM transitions
|
||||
* along with subsystem-level and driver-level callbacks.
|
||||
|
|
@ -799,6 +904,7 @@ struct device {
|
|||
core doesn't touch it */
|
||||
void *driver_data; /* Driver data, set and get with
|
||||
dev_set/get_drvdata */
|
||||
struct dev_links_info links;
|
||||
struct dev_pm_info power;
|
||||
struct dev_pm_domain *pm_domain;
|
||||
|
||||
|
|
@ -1116,6 +1222,10 @@ extern void device_shutdown(void);
|
|||
/* debugging and troubleshooting/diagnostic helpers. */
|
||||
extern const char *dev_driver_string(const struct device *dev);
|
||||
|
||||
/* Device links interface. */
|
||||
struct device_link *device_link_add(struct device *consumer,
|
||||
struct device *supplier, u32 flags);
|
||||
void device_link_del(struct device_link *link);
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ struct dm_io_notify {
|
|||
struct dm_io_client;
|
||||
struct dm_io_request {
|
||||
int bi_op; /* REQ_OP */
|
||||
int bi_op_flags; /* rq_flag_bits */
|
||||
int bi_op_flags; /* req_flag_bits */
|
||||
struct dm_io_memory mem; /* Memory to use for io */
|
||||
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
|
||||
struct dm_io_client *client; /* Client memory handler */
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/fence.h>
|
||||
#include <linux/dma-fence.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct device;
|
||||
|
|
@ -143,7 +143,7 @@ struct dma_buf {
|
|||
wait_queue_head_t poll;
|
||||
|
||||
struct dma_buf_poll_cb_t {
|
||||
struct fence_cb cb;
|
||||
struct dma_fence_cb cb;
|
||||
wait_queue_head_t *poll;
|
||||
|
||||
unsigned long active;
|
||||
|
|
|
|||
86
include/linux/dma-fence-array.h
Normal file
86
include/linux/dma-fence-array.h
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* fence-array: aggregates fence to be waited together
|
||||
*
|
||||
* Copyright (C) 2016 Collabora Ltd
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
* Authors:
|
||||
* Gustavo Padovan <gustavo@padovan.org>
|
||||
* Christian König <christian.koenig@amd.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_DMA_FENCE_ARRAY_H
|
||||
#define __LINUX_DMA_FENCE_ARRAY_H
|
||||
|
||||
#include <linux/dma-fence.h>
|
||||
|
||||
/**
|
||||
* struct dma_fence_array_cb - callback helper for fence array
|
||||
* @cb: fence callback structure for signaling
|
||||
* @array: reference to the parent fence array object
|
||||
*/
|
||||
struct dma_fence_array_cb {
|
||||
struct dma_fence_cb cb;
|
||||
struct dma_fence_array *array;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_fence_array - fence to represent an array of fences
|
||||
* @base: fence base class
|
||||
* @lock: spinlock for fence handling
|
||||
* @num_fences: number of fences in the array
|
||||
* @num_pending: fences in the array still pending
|
||||
* @fences: array of the fences
|
||||
*/
|
||||
struct dma_fence_array {
|
||||
struct dma_fence base;
|
||||
|
||||
spinlock_t lock;
|
||||
unsigned num_fences;
|
||||
atomic_t num_pending;
|
||||
struct dma_fence **fences;
|
||||
};
|
||||
|
||||
extern const struct dma_fence_ops dma_fence_array_ops;
|
||||
|
||||
/**
|
||||
* dma_fence_is_array - check if a fence is from the array subsclass
|
||||
* @fence: fence to test
|
||||
*
|
||||
* Return true if it is a dma_fence_array and false otherwise.
|
||||
*/
|
||||
static inline bool dma_fence_is_array(struct dma_fence *fence)
|
||||
{
|
||||
return fence->ops == &dma_fence_array_ops;
|
||||
}
|
||||
|
||||
/**
|
||||
* to_dma_fence_array - cast a fence to a dma_fence_array
|
||||
* @fence: fence to cast to a dma_fence_array
|
||||
*
|
||||
* Returns NULL if the fence is not a dma_fence_array,
|
||||
* or the dma_fence_array otherwise.
|
||||
*/
|
||||
static inline struct dma_fence_array *
|
||||
to_dma_fence_array(struct dma_fence *fence)
|
||||
{
|
||||
if (fence->ops != &dma_fence_array_ops)
|
||||
return NULL;
|
||||
|
||||
return container_of(fence, struct dma_fence_array, base);
|
||||
}
|
||||
|
||||
struct dma_fence_array *dma_fence_array_create(int num_fences,
|
||||
struct dma_fence **fences,
|
||||
u64 context, unsigned seqno,
|
||||
bool signal_on_any);
|
||||
|
||||
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
|
||||
438
include/linux/dma-fence.h
Normal file
438
include/linux/dma-fence.h
Normal file
|
|
@ -0,0 +1,438 @@
|
|||
/*
|
||||
* Fence mechanism for dma-buf to allow for asynchronous dma access
|
||||
*
|
||||
* Copyright (C) 2012 Canonical Ltd
|
||||
* Copyright (C) 2012 Texas Instruments
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_DMA_FENCE_H
|
||||
#define __LINUX_DMA_FENCE_H
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
struct dma_fence;
|
||||
struct dma_fence_ops;
|
||||
struct dma_fence_cb;
|
||||
|
||||
/**
|
||||
* struct dma_fence - software synchronization primitive
|
||||
* @refcount: refcount for this fence
|
||||
* @ops: dma_fence_ops associated with this fence
|
||||
* @rcu: used for releasing fence with kfree_rcu
|
||||
* @cb_list: list of all callbacks to call
|
||||
* @lock: spin_lock_irqsave used for locking
|
||||
* @context: execution context this fence belongs to, returned by
|
||||
* dma_fence_context_alloc()
|
||||
* @seqno: the sequence number of this fence inside the execution context,
|
||||
* can be compared to decide which fence would be signaled later.
|
||||
* @flags: A mask of DMA_FENCE_FLAG_* defined below
|
||||
* @timestamp: Timestamp when the fence was signaled.
|
||||
* @status: Optional, only valid if < 0, must be set before calling
|
||||
* dma_fence_signal, indicates that the fence has completed with an error.
|
||||
*
|
||||
* the flags member must be manipulated and read using the appropriate
|
||||
* atomic ops (bit_*), so taking the spinlock will not be needed most
|
||||
* of the time.
|
||||
*
|
||||
* DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
|
||||
* DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
|
||||
* DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
|
||||
* implementer of the fence for its own purposes. Can be used in different
|
||||
* ways by different fence implementers, so do not rely on this.
|
||||
*
|
||||
* Since atomic bitops are used, this is not guaranteed to be the case.
|
||||
* Particularly, if the bit was set, but dma_fence_signal was called right
|
||||
* before this bit was set, it would have been able to set the
|
||||
* DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
|
||||
* Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
|
||||
* DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
|
||||
* after dma_fence_signal was called, any enable_signaling call will have either
|
||||
* been completed, or never called at all.
|
||||
*/
|
||||
struct dma_fence {
|
||||
struct kref refcount;
|
||||
const struct dma_fence_ops *ops;
|
||||
struct rcu_head rcu;
|
||||
struct list_head cb_list;
|
||||
spinlock_t *lock;
|
||||
u64 context;
|
||||
unsigned seqno;
|
||||
unsigned long flags;
|
||||
ktime_t timestamp;
|
||||
int status;
|
||||
};
|
||||
|
||||
enum dma_fence_flag_bits {
|
||||
DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
|
||||
};
|
||||
|
||||
typedef void (*dma_fence_func_t)(struct dma_fence *fence,
|
||||
struct dma_fence_cb *cb);
|
||||
|
||||
/**
|
||||
* struct dma_fence_cb - callback for dma_fence_add_callback
|
||||
* @node: used by dma_fence_add_callback to append this struct to fence::cb_list
|
||||
* @func: dma_fence_func_t to call
|
||||
*
|
||||
* This struct will be initialized by dma_fence_add_callback, additional
|
||||
* data can be passed along by embedding dma_fence_cb in another struct.
|
||||
*/
|
||||
struct dma_fence_cb {
|
||||
struct list_head node;
|
||||
dma_fence_func_t func;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_fence_ops - operations implemented for fence
|
||||
* @get_driver_name: returns the driver name.
|
||||
* @get_timeline_name: return the name of the context this fence belongs to.
|
||||
* @enable_signaling: enable software signaling of fence.
|
||||
* @signaled: [optional] peek whether the fence is signaled, can be null.
|
||||
* @wait: custom wait implementation, or dma_fence_default_wait.
|
||||
* @release: [optional] called on destruction of fence, can be null
|
||||
* @fill_driver_data: [optional] callback to fill in free-form debug info
|
||||
* Returns amount of bytes filled, or -errno.
|
||||
* @fence_value_str: [optional] fills in the value of the fence as a string
|
||||
* @timeline_value_str: [optional] fills in the current value of the timeline
|
||||
* as a string
|
||||
*
|
||||
* Notes on enable_signaling:
|
||||
* For fence implementations that have the capability for hw->hw
|
||||
* signaling, they can implement this op to enable the necessary
|
||||
* irqs, or insert commands into cmdstream, etc. This is called
|
||||
* in the first wait() or add_callback() path to let the fence
|
||||
* implementation know that there is another driver waiting on
|
||||
* the signal (ie. hw->sw case).
|
||||
*
|
||||
* This function can be called called from atomic context, but not
|
||||
* from irq context, so normal spinlocks can be used.
|
||||
*
|
||||
* A return value of false indicates the fence already passed,
|
||||
* or some failure occurred that made it impossible to enable
|
||||
* signaling. True indicates successful enabling.
|
||||
*
|
||||
* fence->status may be set in enable_signaling, but only when false is
|
||||
* returned.
|
||||
*
|
||||
* Calling dma_fence_signal before enable_signaling is called allows
|
||||
* for a tiny race window in which enable_signaling is called during,
|
||||
* before, or after dma_fence_signal. To fight this, it is recommended
|
||||
* that before enable_signaling returns true an extra reference is
|
||||
* taken on the fence, to be released when the fence is signaled.
|
||||
* This will mean dma_fence_signal will still be called twice, but
|
||||
* the second time will be a noop since it was already signaled.
|
||||
*
|
||||
* Notes on signaled:
|
||||
* May set fence->status if returning true.
|
||||
*
|
||||
* Notes on wait:
|
||||
* Must not be NULL, set to dma_fence_default_wait for default implementation.
|
||||
* the dma_fence_default_wait implementation should work for any fence, as long
|
||||
* as enable_signaling works correctly.
|
||||
*
|
||||
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
|
||||
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
|
||||
* timed out. Can also return other error values on custom implementations,
|
||||
* which should be treated as if the fence is signaled. For example a hardware
|
||||
* lockup could be reported like that.
|
||||
*
|
||||
* Notes on release:
|
||||
* Can be NULL, this function allows additional commands to run on
|
||||
* destruction of the fence. Can be called from irq context.
|
||||
* If pointer is set to NULL, kfree will get called instead.
|
||||
*/
|
||||
|
||||
struct dma_fence_ops {
|
||||
const char * (*get_driver_name)(struct dma_fence *fence);
|
||||
const char * (*get_timeline_name)(struct dma_fence *fence);
|
||||
bool (*enable_signaling)(struct dma_fence *fence);
|
||||
bool (*signaled)(struct dma_fence *fence);
|
||||
signed long (*wait)(struct dma_fence *fence,
|
||||
bool intr, signed long timeout);
|
||||
void (*release)(struct dma_fence *fence);
|
||||
|
||||
int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
|
||||
void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
|
||||
void (*timeline_value_str)(struct dma_fence *fence,
|
||||
char *str, int size);
|
||||
};
|
||||
|
||||
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
|
||||
spinlock_t *lock, u64 context, unsigned seqno);
|
||||
|
||||
void dma_fence_release(struct kref *kref);
|
||||
void dma_fence_free(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* dma_fence_put - decreases refcount of the fence
|
||||
* @fence: [in] fence to reduce refcount of
|
||||
*/
|
||||
static inline void dma_fence_put(struct dma_fence *fence)
|
||||
{
|
||||
if (fence)
|
||||
kref_put(&fence->refcount, dma_fence_release);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_get - increases refcount of the fence
|
||||
* @fence: [in] fence to increase refcount of
|
||||
*
|
||||
* Returns the same fence, with refcount increased by 1.
|
||||
*/
|
||||
static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
|
||||
{
|
||||
if (fence)
|
||||
kref_get(&fence->refcount);
|
||||
return fence;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_get_rcu - get a fence from a reservation_object_list with
|
||||
* rcu read lock
|
||||
* @fence: [in] fence to increase refcount of
|
||||
*
|
||||
* Function returns NULL if no refcount could be obtained, or the fence.
|
||||
*/
|
||||
static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
|
||||
{
|
||||
if (kref_get_unless_zero(&fence->refcount))
|
||||
return fence;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence
|
||||
* @fencep: [in] pointer to fence to increase refcount of
|
||||
*
|
||||
* Function returns NULL if no refcount could be obtained, or the fence.
|
||||
* This function handles acquiring a reference to a fence that may be
|
||||
* reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
|
||||
* so long as the caller is using RCU on the pointer to the fence.
|
||||
*
|
||||
* An alternative mechanism is to employ a seqlock to protect a bunch of
|
||||
* fences, such as used by struct reservation_object. When using a seqlock,
|
||||
* the seqlock must be taken before and checked after a reference to the
|
||||
* fence is acquired (as shown here).
|
||||
*
|
||||
* The caller is required to hold the RCU read lock.
|
||||
*/
|
||||
static inline struct dma_fence *
|
||||
dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
|
||||
{
|
||||
do {
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = rcu_dereference(*fencep);
|
||||
if (!fence || !dma_fence_get_rcu(fence))
|
||||
return NULL;
|
||||
|
||||
/* The atomic_inc_not_zero() inside dma_fence_get_rcu()
|
||||
* provides a full memory barrier upon success (such as now).
|
||||
* This is paired with the write barrier from assigning
|
||||
* to the __rcu protected fence pointer so that if that
|
||||
* pointer still matches the current fence, we know we
|
||||
* have successfully acquire a reference to it. If it no
|
||||
* longer matches, we are holding a reference to some other
|
||||
* reallocated pointer. This is possible if the allocator
|
||||
* is using a freelist like SLAB_DESTROY_BY_RCU where the
|
||||
* fence remains valid for the RCU grace period, but it
|
||||
* may be reallocated. When using such allocators, we are
|
||||
* responsible for ensuring the reference we get is to
|
||||
* the right fence, as below.
|
||||
*/
|
||||
if (fence == rcu_access_pointer(*fencep))
|
||||
return rcu_pointer_handoff(fence);
|
||||
|
||||
dma_fence_put(fence);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
int dma_fence_signal(struct dma_fence *fence);
|
||||
int dma_fence_signal_locked(struct dma_fence *fence);
|
||||
signed long dma_fence_default_wait(struct dma_fence *fence,
|
||||
bool intr, signed long timeout);
|
||||
int dma_fence_add_callback(struct dma_fence *fence,
|
||||
struct dma_fence_cb *cb,
|
||||
dma_fence_func_t func);
|
||||
bool dma_fence_remove_callback(struct dma_fence *fence,
|
||||
struct dma_fence_cb *cb);
|
||||
void dma_fence_enable_sw_signaling(struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* dma_fence_is_signaled_locked - Return an indication if the fence
|
||||
* is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if dma_fence_add_callback, dma_fence_wait or
|
||||
* dma_fence_enable_sw_signaling haven't been called before.
|
||||
*
|
||||
* This function requires fence->lock to be held.
|
||||
*/
|
||||
static inline bool
|
||||
dma_fence_is_signaled_locked(struct dma_fence *fence)
|
||||
{
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return true;
|
||||
|
||||
if (fence->ops->signaled && fence->ops->signaled(fence)) {
|
||||
dma_fence_signal_locked(fence);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_is_signaled - Return an indication if the fence is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if dma_fence_add_callback, dma_fence_wait or
|
||||
* dma_fence_enable_sw_signaling haven't been called before.
|
||||
*
|
||||
* It's recommended for seqno fences to call dma_fence_signal when the
|
||||
* operation is complete, it makes it possible to prevent issues from
|
||||
* wraparound between time of issue and time of use by checking the return
|
||||
* value of this function before calling hardware-specific wait instructions.
|
||||
*/
|
||||
static inline bool
|
||||
dma_fence_is_signaled(struct dma_fence *fence)
|
||||
{
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return true;
|
||||
|
||||
if (fence->ops->signaled && fence->ops->signaled(fence)) {
|
||||
dma_fence_signal(fence);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_is_later - return if f1 is chronologically later than f2
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
*
|
||||
* Returns true if f1 is chronologically later than f2. Both fences must be
|
||||
* from the same context, since a seqno is not re-used across contexts.
|
||||
*/
|
||||
static inline bool dma_fence_is_later(struct dma_fence *f1,
|
||||
struct dma_fence *f2)
|
||||
{
|
||||
if (WARN_ON(f1->context != f2->context))
|
||||
return false;
|
||||
|
||||
return (int)(f1->seqno - f2->seqno) > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_fence_later - return the chronologically later fence
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
*
|
||||
* Returns NULL if both fences are signaled, otherwise the fence that would be
|
||||
* signaled last. Both fences must be from the same context, since a seqno is
|
||||
* not re-used across contexts.
|
||||
*/
|
||||
static inline struct dma_fence *dma_fence_later(struct dma_fence *f1,
|
||||
struct dma_fence *f2)
|
||||
{
|
||||
if (WARN_ON(f1->context != f2->context))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
|
||||
* have been set if enable_signaling wasn't called, and enabling that
|
||||
* here is overkill.
|
||||
*/
|
||||
if (dma_fence_is_later(f1, f2))
|
||||
return dma_fence_is_signaled(f1) ? NULL : f1;
|
||||
else
|
||||
return dma_fence_is_signaled(f2) ? NULL : f2;
|
||||
}
|
||||
|
||||
signed long dma_fence_wait_timeout(struct dma_fence *,
|
||||
bool intr, signed long timeout);
|
||||
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
|
||||
uint32_t count,
|
||||
bool intr, signed long timeout,
|
||||
uint32_t *idx);
|
||||
|
||||
/**
|
||||
* dma_fence_wait - sleep until the fence gets signaled
|
||||
* @fence: [in] the fence to wait on
|
||||
* @intr: [in] if true, do an interruptible wait
|
||||
*
|
||||
* This function will return -ERESTARTSYS if interrupted by a signal,
|
||||
* or 0 if the fence was signaled. Other error values may be
|
||||
* returned on custom implementations.
|
||||
*
|
||||
* Performs a synchronous wait on this fence. It is assumed the caller
|
||||
* directly or indirectly holds a reference to the fence, otherwise the
|
||||
* fence might be freed before return, resulting in undefined behavior.
|
||||
*/
|
||||
static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
|
||||
{
|
||||
signed long ret;
|
||||
|
||||
/* Since dma_fence_wait_timeout cannot timeout with
|
||||
* MAX_SCHEDULE_TIMEOUT, only valid return values are
|
||||
* -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
|
||||
*/
|
||||
ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
u64 dma_fence_context_alloc(unsigned num);
|
||||
|
||||
#define DMA_FENCE_TRACE(f, fmt, args...) \
|
||||
do { \
|
||||
struct dma_fence *__ff = (f); \
|
||||
if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
|
||||
pr_info("f %llu#%u: " fmt, \
|
||||
__ff->context, __ff->seqno, ##args); \
|
||||
} while (0)
|
||||
|
||||
#define DMA_FENCE_WARN(f, fmt, args...) \
|
||||
do { \
|
||||
struct dma_fence *__ff = (f); \
|
||||
pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
|
||||
##args); \
|
||||
} while (0)
|
||||
|
||||
#define DMA_FENCE_ERR(f, fmt, args...) \
|
||||
do { \
|
||||
struct dma_fence *__ff = (f); \
|
||||
pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
|
||||
##args); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __LINUX_DMA_FENCE_H */
|
||||
|
|
@ -61,6 +61,10 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
|||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
int iommu_dma_supported(struct device *dev, u64 mask);
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
|
|
|
|||
|
|
@ -243,29 +243,33 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
|
|||
ops->unmap_sg(dev, sg, nents, dir, attrs);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
struct page *page,
|
||||
size_t offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
kmemcheck_mark_initialized(page_address(page) + offset, size);
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
addr = ops->map_page(dev, page, offset, size, dir, 0);
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
static inline void dma_unmap_page_attrs(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, 0);
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir, false);
|
||||
}
|
||||
|
||||
|
|
@ -385,6 +389,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
|
||||
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
|
||||
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
|
||||
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
|
||||
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
|
||||
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||
|
|
|
|||
|
|
@ -336,6 +336,12 @@ enum dma_slave_buswidth {
|
|||
* may or may not be applicable on memory sources.
|
||||
* @dst_maxburst: same as src_maxburst but for destination target
|
||||
* mutatis mutandis.
|
||||
* @src_port_window_size: The length of the register area in words the data need
|
||||
* to be accessed on the device side. It is only used for devices which is using
|
||||
* an area instead of a single register to receive the data. Typically the DMA
|
||||
* loops in this area in order to transfer the data.
|
||||
* @dst_port_window_size: same as src_port_window_size but for the destination
|
||||
* port.
|
||||
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
|
||||
* with 'true' if peripheral should be flow controller. Direction will be
|
||||
* selected at Runtime.
|
||||
|
|
@ -363,6 +369,8 @@ struct dma_slave_config {
|
|||
enum dma_slave_buswidth dst_addr_width;
|
||||
u32 src_maxburst;
|
||||
u32 dst_maxburst;
|
||||
u32 src_port_window_size;
|
||||
u32 dst_port_window_size;
|
||||
bool device_fc;
|
||||
unsigned int slave_id;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@
|
|||
* genl_magic_func.h
|
||||
* generates an entry in the static genl_ops array,
|
||||
* and static register/unregister functions to
|
||||
* genl_register_family_with_ops().
|
||||
* genl_register_family().
|
||||
*
|
||||
* flags and handler:
|
||||
* GENL_op_init( .doit = x, .dumpit = y, .flags = something)
|
||||
|
|
|
|||
|
|
@ -50,6 +50,6 @@ dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
|
|||
unsigned long freq);
|
||||
void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs);
|
||||
void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs);
|
||||
cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs);
|
||||
u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs);
|
||||
|
||||
#endif /* __DW_APB_TIMER_H__ */
|
||||
|
|
|
|||
|
|
@ -18,6 +18,8 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#define EDAC_DEVICE_NAME_LEN 31
|
||||
|
||||
struct device;
|
||||
|
||||
#define EDAC_OPSTATE_INVAL -1
|
||||
|
|
@ -128,12 +130,21 @@ enum dev_type {
|
|||
* fatal (maybe it is on an unused memory area,
|
||||
* or the memory controller could recover from
|
||||
* it for example, by re-trying the operation).
|
||||
* @HW_EVENT_ERR_DEFERRED: Deferred Error - Indicates an uncorrectable
|
||||
* error whose handling is not urgent. This could
|
||||
* be due to hardware data poisoning where the
|
||||
* system can continue operation until the poisoned
|
||||
* data is consumed. Preemptive measures may also
|
||||
* be taken, e.g. offlining pages, etc.
|
||||
* @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not
|
||||
* be recovered.
|
||||
* @HW_EVENT_ERR_INFO: Informational - The CPER spec defines a forth
|
||||
* type of error: informational logs.
|
||||
*/
|
||||
enum hw_event_mc_err_type {
|
||||
HW_EVENT_ERR_CORRECTED,
|
||||
HW_EVENT_ERR_UNCORRECTED,
|
||||
HW_EVENT_ERR_DEFERRED,
|
||||
HW_EVENT_ERR_FATAL,
|
||||
HW_EVENT_ERR_INFO,
|
||||
};
|
||||
|
|
@ -145,6 +156,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
|
|||
return "Corrected";
|
||||
case HW_EVENT_ERR_UNCORRECTED:
|
||||
return "Uncorrected";
|
||||
case HW_EVENT_ERR_DEFERRED:
|
||||
return "Deferred";
|
||||
case HW_EVENT_ERR_FATAL:
|
||||
return "Fatal";
|
||||
default:
|
||||
|
|
@ -157,7 +170,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
|
|||
* enum mem_type - memory types. For a more detailed reference, please see
|
||||
* http://en.wikipedia.org/wiki/DRAM
|
||||
*
|
||||
* @MEM_EMPTY Empty csrow
|
||||
* @MEM_EMPTY: Empty csrow
|
||||
* @MEM_RESERVED: Reserved csrow type
|
||||
* @MEM_UNKNOWN: Unknown csrow type
|
||||
* @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995.
|
||||
|
|
@ -192,10 +205,11 @@ static inline char *mc_event_error_type(const unsigned int err_type)
|
|||
* @MEM_DDR3: DDR3 RAM
|
||||
* @MEM_RDDR3: Registered DDR3 RAM
|
||||
* This is a variant of the DDR3 memories.
|
||||
* @MEM_LRDDR3 Load-Reduced DDR3 memory.
|
||||
* @MEM_LRDDR3: Load-Reduced DDR3 memory.
|
||||
* @MEM_DDR4: Unbuffered DDR4 RAM
|
||||
* @MEM_RDDR4: Registered DDR4 RAM
|
||||
* This is a variant of the DDR4 memories.
|
||||
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
|
||||
*/
|
||||
enum mem_type {
|
||||
MEM_EMPTY = 0,
|
||||
|
|
@ -218,6 +232,7 @@ enum mem_type {
|
|||
MEM_LRDDR3,
|
||||
MEM_DDR4,
|
||||
MEM_RDDR4,
|
||||
MEM_LRDDR4,
|
||||
};
|
||||
|
||||
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
|
||||
|
|
@ -239,6 +254,7 @@ enum mem_type {
|
|||
#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
|
||||
#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
|
||||
#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
|
||||
#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
|
||||
|
||||
/**
|
||||
* enum edac-type - Error Detection and Correction capabilities and mode
|
||||
|
|
@ -278,7 +294,7 @@ enum edac_type {
|
|||
|
||||
/**
|
||||
* enum scrub_type - scrubbing capabilities
|
||||
* @SCRUB_UNKNOWN Unknown if scrubber is available
|
||||
* @SCRUB_UNKNOWN: Unknown if scrubber is available
|
||||
* @SCRUB_NONE: No scrubber
|
||||
* @SCRUB_SW_PROG: SW progressive (sequential) scrubbing
|
||||
* @SCRUB_SW_SRC: Software scrub only errors
|
||||
|
|
@ -287,7 +303,7 @@ enum edac_type {
|
|||
* @SCRUB_HW_PROG: HW progressive (sequential) scrubbing
|
||||
* @SCRUB_HW_SRC: Hardware scrub only errors
|
||||
* @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error
|
||||
* SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable
|
||||
* @SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable
|
||||
*/
|
||||
enum scrub_type {
|
||||
SCRUB_UNKNOWN = 0,
|
||||
|
|
@ -320,114 +336,6 @@ enum scrub_type {
|
|||
#define OP_RUNNING_POLL_INTR 0x203
|
||||
#define OP_OFFLINE 0x300
|
||||
|
||||
/*
|
||||
* Concepts used at the EDAC subsystem
|
||||
*
|
||||
* There are several things to be aware of that aren't at all obvious:
|
||||
*
|
||||
* SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
|
||||
*
|
||||
* These are some of the many terms that are thrown about that don't always
|
||||
* mean what people think they mean (Inconceivable!). In the interest of
|
||||
* creating a common ground for discussion, terms and their definitions
|
||||
* will be established.
|
||||
*
|
||||
* Memory devices: The individual DRAM chips on a memory stick. These
|
||||
* devices commonly output 4 and 8 bits each (x4, x8).
|
||||
* Grouping several of these in parallel provides the
|
||||
* number of bits that the memory controller expects:
|
||||
* typically 72 bits, in order to provide 64 bits +
|
||||
* 8 bits of ECC data.
|
||||
*
|
||||
* Memory Stick: A printed circuit board that aggregates multiple
|
||||
* memory devices in parallel. In general, this is the
|
||||
* Field Replaceable Unit (FRU) which gets replaced, in
|
||||
* the case of excessive errors. Most often it is also
|
||||
* called DIMM (Dual Inline Memory Module).
|
||||
*
|
||||
* Memory Socket: A physical connector on the motherboard that accepts
|
||||
* a single memory stick. Also called as "slot" on several
|
||||
* datasheets.
|
||||
*
|
||||
* Channel: A memory controller channel, responsible to communicate
|
||||
* with a group of DIMMs. Each channel has its own
|
||||
* independent control (command) and data bus, and can
|
||||
* be used independently or grouped with other channels.
|
||||
*
|
||||
* Branch: It is typically the highest hierarchy on a
|
||||
* Fully-Buffered DIMM memory controller.
|
||||
* Typically, it contains two channels.
|
||||
* Two channels at the same branch can be used in single
|
||||
* mode or in lockstep mode.
|
||||
* When lockstep is enabled, the cacheline is doubled,
|
||||
* but it generally brings some performance penalty.
|
||||
* Also, it is generally not possible to point to just one
|
||||
* memory stick when an error occurs, as the error
|
||||
* correction code is calculated using two DIMMs instead
|
||||
* of one. Due to that, it is capable of correcting more
|
||||
* errors than on single mode.
|
||||
*
|
||||
* Single-channel: The data accessed by the memory controller is contained
|
||||
* into one dimm only. E. g. if the data is 64 bits-wide,
|
||||
* the data flows to the CPU using one 64 bits parallel
|
||||
* access.
|
||||
* Typically used with SDR, DDR, DDR2 and DDR3 memories.
|
||||
* FB-DIMM and RAMBUS use a different concept for channel,
|
||||
* so this concept doesn't apply there.
|
||||
*
|
||||
* Double-channel: The data size accessed by the memory controller is
|
||||
* interlaced into two dimms, accessed at the same time.
|
||||
* E. g. if the DIMM is 64 bits-wide (72 bits with ECC),
|
||||
* the data flows to the CPU using a 128 bits parallel
|
||||
* access.
|
||||
*
|
||||
* Chip-select row: This is the name of the DRAM signal used to select the
|
||||
* DRAM ranks to be accessed. Common chip-select rows for
|
||||
* single channel are 64 bits, for dual channel 128 bits.
|
||||
* It may not be visible by the memory controller, as some
|
||||
* DIMM types have a memory buffer that can hide direct
|
||||
* access to it from the Memory Controller.
|
||||
*
|
||||
* Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory.
|
||||
* Motherboards commonly drive two chip-select pins to
|
||||
* a memory stick. A single-ranked stick, will occupy
|
||||
* only one of those rows. The other will be unused.
|
||||
*
|
||||
* Double-Ranked stick: A double-ranked stick has two chip-select rows which
|
||||
* access different sets of memory devices. The two
|
||||
* rows cannot be accessed concurrently.
|
||||
*
|
||||
* Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
|
||||
* A double-sided stick has two chip-select rows which
|
||||
* access different sets of memory devices. The two
|
||||
* rows cannot be accessed concurrently. "Double-sided"
|
||||
* is irrespective of the memory devices being mounted
|
||||
* on both sides of the memory stick.
|
||||
*
|
||||
* Socket set: All of the memory sticks that are required for
|
||||
* a single memory access or all of the memory sticks
|
||||
* spanned by a chip-select row. A single socket set
|
||||
* has two chip-select rows and if double-sided sticks
|
||||
* are used these will occupy those chip-select rows.
|
||||
*
|
||||
* Bank: This term is avoided because it is unclear when
|
||||
* needing to distinguish between chip-select rows and
|
||||
* socket sets.
|
||||
*
|
||||
* Controller pages:
|
||||
*
|
||||
* Physical pages:
|
||||
*
|
||||
* Virtual pages:
|
||||
*
|
||||
*
|
||||
* STRUCTURE ORGANIZATION AND CHOICES
|
||||
*
|
||||
*
|
||||
*
|
||||
* PS - I enjoyed writing all that about as much as you enjoyed reading it.
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum edac_mc_layer - memory controller hierarchy layer
|
||||
*
|
||||
|
|
@ -452,7 +360,7 @@ enum edac_mc_layer_type {
|
|||
|
||||
/**
|
||||
* struct edac_mc_layer - describes the memory controller hierarchy
|
||||
* @layer: layer type
|
||||
* @type: layer type
|
||||
* @size: number of components per layer. For example,
|
||||
* if the channel layer has two channels, size = 2
|
||||
* @is_virt_csrow: This layer is part of the "csrow" when old API
|
||||
|
|
@ -475,24 +383,28 @@ struct edac_mc_layer {
|
|||
#define EDAC_MAX_LAYERS 3
|
||||
|
||||
/**
|
||||
* EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array
|
||||
* for the element given by [layer0,layer1,layer2] position
|
||||
* EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer
|
||||
* array for the element given by [layer0,layer1,layer2]
|
||||
* position
|
||||
*
|
||||
* @layers: a struct edac_mc_layer array, describing how many elements
|
||||
* were allocated for each layer
|
||||
* @n_layers: Number of layers at the @layers array
|
||||
* @nlayers: Number of layers at the @layers array
|
||||
* @layer0: layer0 position
|
||||
* @layer1: layer1 position. Unused if n_layers < 2
|
||||
* @layer2: layer2 position. Unused if n_layers < 3
|
||||
*
|
||||
* For 1 layer, this macro returns &var[layer0] - &var
|
||||
* For 1 layer, this macro returns "var[layer0] - var";
|
||||
*
|
||||
* For 2 layers, this macro is similar to allocate a bi-dimensional array
|
||||
* and to return "&var[layer0][layer1] - &var"
|
||||
* and to return "var[layer0][layer1] - var";
|
||||
*
|
||||
* For 3 layers, this macro is similar to allocate a tri-dimensional array
|
||||
* and to return "&var[layer0][layer1][layer2] - &var"
|
||||
* and to return "var[layer0][layer1][layer2] - var".
|
||||
*
|
||||
* A loop could be used here to make it more generic, but, as we only have
|
||||
* 3 layers, this is a little faster.
|
||||
*
|
||||
* By design, layers can never be 0 or more than 3. If that ever happens,
|
||||
* a NULL is returned, causing an OOPS during the memory allocation routine,
|
||||
* with would point to the developer that he's doing something wrong.
|
||||
|
|
@ -519,16 +431,18 @@ struct edac_mc_layer {
|
|||
* were allocated for each layer
|
||||
* @var: name of the var where we want to get the pointer
|
||||
* (like mci->dimms)
|
||||
* @n_layers: Number of layers at the @layers array
|
||||
* @nlayers: Number of layers at the @layers array
|
||||
* @layer0: layer0 position
|
||||
* @layer1: layer1 position. Unused if n_layers < 2
|
||||
* @layer2: layer2 position. Unused if n_layers < 3
|
||||
*
|
||||
* For 1 layer, this macro returns &var[layer0]
|
||||
* For 1 layer, this macro returns "var[layer0]";
|
||||
*
|
||||
* For 2 layers, this macro is similar to allocate a bi-dimensional array
|
||||
* and to return "&var[layer0][layer1]"
|
||||
* and to return "var[layer0][layer1]";
|
||||
*
|
||||
* For 3 layers, this macro is similar to allocate a tri-dimensional array
|
||||
* and to return "&var[layer0][layer1][layer2]"
|
||||
* and to return "var[layer0][layer1][layer2]";
|
||||
*/
|
||||
#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
|
||||
typeof(*var) __p; \
|
||||
|
|
@ -614,7 +528,7 @@ struct errcount_attribute_data {
|
|||
};
|
||||
|
||||
/**
|
||||
* edac_raw_error_desc - Raw error report structure
|
||||
* struct edac_raw_error_desc - Raw error report structure
|
||||
* @grain: minimum granularity for an error report, in bytes
|
||||
* @error_count: number of errors of the same type
|
||||
* @top_layer: top layer of the error (layer[0])
|
||||
|
|
|
|||
|
|
@ -443,6 +443,22 @@ typedef struct {
|
|||
#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
|
||||
#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
|
||||
|
||||
typedef struct {
|
||||
u32 version;
|
||||
u32 get;
|
||||
u32 set;
|
||||
u32 del;
|
||||
u32 get_all;
|
||||
} apple_properties_protocol_32_t;
|
||||
|
||||
typedef struct {
|
||||
u64 version;
|
||||
u64 get;
|
||||
u64 set;
|
||||
u64 del;
|
||||
u64 get_all;
|
||||
} apple_properties_protocol_64_t;
|
||||
|
||||
/*
|
||||
* Types and defines for EFI ResetSystem
|
||||
*/
|
||||
|
|
@ -589,8 +605,10 @@ void efi_native_runtime_setup(void);
|
|||
#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
|
||||
#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
|
||||
#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
|
||||
#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61)
|
||||
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
|
||||
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
|
||||
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
|
||||
|
||||
/*
|
||||
* This GUID is used to pass to the kernel proper the struct screen_info
|
||||
|
|
@ -599,6 +617,7 @@ void efi_native_runtime_setup(void);
|
|||
*/
|
||||
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
|
||||
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
|
||||
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
|
||||
|
||||
typedef struct {
|
||||
efi_guid_t guid;
|
||||
|
|
@ -872,6 +891,7 @@ extern struct efi {
|
|||
unsigned long esrt; /* ESRT table */
|
||||
unsigned long properties_table; /* properties table */
|
||||
unsigned long mem_attr_table; /* memory attributes table */
|
||||
unsigned long rng_seed; /* UEFI firmware random seed */
|
||||
efi_get_time_t *get_time;
|
||||
efi_set_time_t *set_time;
|
||||
efi_get_wakeup_time_t *get_wakeup_time;
|
||||
|
|
@ -1145,6 +1165,26 @@ struct efi_generic_dev_path {
|
|||
u16 length;
|
||||
} __attribute ((packed));
|
||||
|
||||
struct efi_dev_path {
|
||||
u8 type; /* can be replaced with unnamed */
|
||||
u8 sub_type; /* struct efi_generic_dev_path; */
|
||||
u16 length; /* once we've moved to -std=c11 */
|
||||
union {
|
||||
struct {
|
||||
u32 hid;
|
||||
u32 uid;
|
||||
} acpi;
|
||||
struct {
|
||||
u8 fn;
|
||||
u8 dev;
|
||||
} pci;
|
||||
};
|
||||
} __attribute ((packed));
|
||||
|
||||
#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER)
|
||||
struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len);
|
||||
#endif
|
||||
|
||||
static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
|
||||
{
|
||||
*npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr);
|
||||
|
|
@ -1493,4 +1533,10 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
|
|||
struct efi_boot_memmap *map,
|
||||
void *priv,
|
||||
efi_exit_boot_map_processing priv_func);
|
||||
|
||||
struct linux_efi_random_seed {
|
||||
u32 size;
|
||||
u8 bits[];
|
||||
};
|
||||
|
||||
#endif /* _LINUX_EFI_H */
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int);
|
|||
typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
|
||||
typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
|
||||
typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
|
||||
typedef int (elevator_may_queue_fn) (struct request_queue *, int, int);
|
||||
typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
|
||||
|
||||
typedef void (elevator_init_icq_fn) (struct io_cq *);
|
||||
typedef void (elevator_exit_icq_fn) (struct io_cq *);
|
||||
|
|
@ -108,6 +108,11 @@ struct elevator_type
|
|||
|
||||
#define ELV_HASH_BITS 6
|
||||
|
||||
void elv_rqhash_del(struct request_queue *q, struct request *rq);
|
||||
void elv_rqhash_add(struct request_queue *q, struct request *rq);
|
||||
void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
|
||||
struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
|
||||
|
||||
/*
|
||||
* each queue has an elevator_queue associated with it
|
||||
*/
|
||||
|
|
@ -139,7 +144,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request
|
|||
extern struct request *elv_latter_request(struct request_queue *, struct request *);
|
||||
extern int elv_register_queue(struct request_queue *q);
|
||||
extern void elv_unregister_queue(struct request_queue *q);
|
||||
extern int elv_may_queue(struct request_queue *, int, int);
|
||||
extern int elv_may_queue(struct request_queue *, unsigned int);
|
||||
extern void elv_completed_request(struct request_queue *, struct request *);
|
||||
extern int elv_set_request(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio, gfp_t gfp_mask);
|
||||
|
|
|
|||
|
|
@ -52,10 +52,17 @@
|
|||
|
||||
#define VERSION_LEN 256
|
||||
#define MAX_VOLUME_NAME 512
|
||||
#define MAX_PATH_LEN 64
|
||||
#define MAX_DEVICES 8
|
||||
|
||||
/*
|
||||
* For superblock
|
||||
*/
|
||||
struct f2fs_device {
|
||||
__u8 path[MAX_PATH_LEN];
|
||||
__le32 total_segments;
|
||||
} __packed;
|
||||
|
||||
struct f2fs_super_block {
|
||||
__le32 magic; /* Magic Number */
|
||||
__le16 major_ver; /* Major Version */
|
||||
|
|
@ -94,7 +101,8 @@ struct f2fs_super_block {
|
|||
__le32 feature; /* defined features */
|
||||
__u8 encryption_level; /* versioning level for encryption */
|
||||
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
|
||||
__u8 reserved[871]; /* valid reserved region */
|
||||
struct f2fs_device devs[MAX_DEVICES]; /* device list */
|
||||
__u8 reserved[327]; /* valid reserved region */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
int fddi_change_mtu(struct net_device *dev, int new_mtu);
|
||||
struct net_device *alloc_fddidev(int sizeof_priv);
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -1,83 +0,0 @@
|
|||
/*
|
||||
* fence-array: aggregates fence to be waited together
|
||||
*
|
||||
* Copyright (C) 2016 Collabora Ltd
|
||||
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
||||
* Authors:
|
||||
* Gustavo Padovan <gustavo@padovan.org>
|
||||
* Christian König <christian.koenig@amd.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_FENCE_ARRAY_H
|
||||
#define __LINUX_FENCE_ARRAY_H
|
||||
|
||||
#include <linux/fence.h>
|
||||
|
||||
/**
|
||||
* struct fence_array_cb - callback helper for fence array
|
||||
* @cb: fence callback structure for signaling
|
||||
* @array: reference to the parent fence array object
|
||||
*/
|
||||
struct fence_array_cb {
|
||||
struct fence_cb cb;
|
||||
struct fence_array *array;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fence_array - fence to represent an array of fences
|
||||
* @base: fence base class
|
||||
* @lock: spinlock for fence handling
|
||||
* @num_fences: number of fences in the array
|
||||
* @num_pending: fences in the array still pending
|
||||
* @fences: array of the fences
|
||||
*/
|
||||
struct fence_array {
|
||||
struct fence base;
|
||||
|
||||
spinlock_t lock;
|
||||
unsigned num_fences;
|
||||
atomic_t num_pending;
|
||||
struct fence **fences;
|
||||
};
|
||||
|
||||
extern const struct fence_ops fence_array_ops;
|
||||
|
||||
/**
|
||||
* fence_is_array - check if a fence is from the array subsclass
|
||||
*
|
||||
* Return true if it is a fence_array and false otherwise.
|
||||
*/
|
||||
static inline bool fence_is_array(struct fence *fence)
|
||||
{
|
||||
return fence->ops == &fence_array_ops;
|
||||
}
|
||||
|
||||
/**
|
||||
* to_fence_array - cast a fence to a fence_array
|
||||
* @fence: fence to cast to a fence_array
|
||||
*
|
||||
* Returns NULL if the fence is not a fence_array,
|
||||
* or the fence_array otherwise.
|
||||
*/
|
||||
static inline struct fence_array *to_fence_array(struct fence *fence)
|
||||
{
|
||||
if (fence->ops != &fence_array_ops)
|
||||
return NULL;
|
||||
|
||||
return container_of(fence, struct fence_array, base);
|
||||
}
|
||||
|
||||
struct fence_array *fence_array_create(int num_fences, struct fence **fences,
|
||||
u64 context, unsigned seqno,
|
||||
bool signal_on_any);
|
||||
|
||||
#endif /* __LINUX_FENCE_ARRAY_H */
|
||||
|
|
@ -1,378 +0,0 @@
|
|||
/*
|
||||
* Fence mechanism for dma-buf to allow for asynchronous dma access
|
||||
*
|
||||
* Copyright (C) 2012 Canonical Ltd
|
||||
* Copyright (C) 2012 Texas Instruments
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_FENCE_H
|
||||
#define __LINUX_FENCE_H
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
struct fence;
|
||||
struct fence_ops;
|
||||
struct fence_cb;
|
||||
|
||||
/**
|
||||
* struct fence - software synchronization primitive
|
||||
* @refcount: refcount for this fence
|
||||
* @ops: fence_ops associated with this fence
|
||||
* @rcu: used for releasing fence with kfree_rcu
|
||||
* @cb_list: list of all callbacks to call
|
||||
* @lock: spin_lock_irqsave used for locking
|
||||
* @context: execution context this fence belongs to, returned by
|
||||
* fence_context_alloc()
|
||||
* @seqno: the sequence number of this fence inside the execution context,
|
||||
* can be compared to decide which fence would be signaled later.
|
||||
* @flags: A mask of FENCE_FLAG_* defined below
|
||||
* @timestamp: Timestamp when the fence was signaled.
|
||||
* @status: Optional, only valid if < 0, must be set before calling
|
||||
* fence_signal, indicates that the fence has completed with an error.
|
||||
*
|
||||
* the flags member must be manipulated and read using the appropriate
|
||||
* atomic ops (bit_*), so taking the spinlock will not be needed most
|
||||
* of the time.
|
||||
*
|
||||
* FENCE_FLAG_SIGNALED_BIT - fence is already signaled
|
||||
* FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
|
||||
* FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
|
||||
* implementer of the fence for its own purposes. Can be used in different
|
||||
* ways by different fence implementers, so do not rely on this.
|
||||
*
|
||||
* Since atomic bitops are used, this is not guaranteed to be the case.
|
||||
* Particularly, if the bit was set, but fence_signal was called right
|
||||
* before this bit was set, it would have been able to set the
|
||||
* FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
|
||||
* Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
|
||||
* FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
|
||||
* after fence_signal was called, any enable_signaling call will have either
|
||||
* been completed, or never called at all.
|
||||
*/
|
||||
struct fence {
|
||||
struct kref refcount;
|
||||
const struct fence_ops *ops;
|
||||
struct rcu_head rcu;
|
||||
struct list_head cb_list;
|
||||
spinlock_t *lock;
|
||||
u64 context;
|
||||
unsigned seqno;
|
||||
unsigned long flags;
|
||||
ktime_t timestamp;
|
||||
int status;
|
||||
};
|
||||
|
||||
enum fence_flag_bits {
|
||||
FENCE_FLAG_SIGNALED_BIT,
|
||||
FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
FENCE_FLAG_USER_BITS, /* must always be last member */
|
||||
};
|
||||
|
||||
typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
|
||||
|
||||
/**
|
||||
* struct fence_cb - callback for fence_add_callback
|
||||
* @node: used by fence_add_callback to append this struct to fence::cb_list
|
||||
* @func: fence_func_t to call
|
||||
*
|
||||
* This struct will be initialized by fence_add_callback, additional
|
||||
* data can be passed along by embedding fence_cb in another struct.
|
||||
*/
|
||||
struct fence_cb {
|
||||
struct list_head node;
|
||||
fence_func_t func;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fence_ops - operations implemented for fence
|
||||
* @get_driver_name: returns the driver name.
|
||||
* @get_timeline_name: return the name of the context this fence belongs to.
|
||||
* @enable_signaling: enable software signaling of fence.
|
||||
* @signaled: [optional] peek whether the fence is signaled, can be null.
|
||||
* @wait: custom wait implementation, or fence_default_wait.
|
||||
* @release: [optional] called on destruction of fence, can be null
|
||||
* @fill_driver_data: [optional] callback to fill in free-form debug info
|
||||
* Returns amount of bytes filled, or -errno.
|
||||
* @fence_value_str: [optional] fills in the value of the fence as a string
|
||||
* @timeline_value_str: [optional] fills in the current value of the timeline
|
||||
* as a string
|
||||
*
|
||||
* Notes on enable_signaling:
|
||||
* For fence implementations that have the capability for hw->hw
|
||||
* signaling, they can implement this op to enable the necessary
|
||||
* irqs, or insert commands into cmdstream, etc. This is called
|
||||
* in the first wait() or add_callback() path to let the fence
|
||||
* implementation know that there is another driver waiting on
|
||||
* the signal (ie. hw->sw case).
|
||||
*
|
||||
* This function can be called called from atomic context, but not
|
||||
* from irq context, so normal spinlocks can be used.
|
||||
*
|
||||
* A return value of false indicates the fence already passed,
|
||||
* or some failure occurred that made it impossible to enable
|
||||
* signaling. True indicates successful enabling.
|
||||
*
|
||||
* fence->status may be set in enable_signaling, but only when false is
|
||||
* returned.
|
||||
*
|
||||
* Calling fence_signal before enable_signaling is called allows
|
||||
* for a tiny race window in which enable_signaling is called during,
|
||||
* before, or after fence_signal. To fight this, it is recommended
|
||||
* that before enable_signaling returns true an extra reference is
|
||||
* taken on the fence, to be released when the fence is signaled.
|
||||
* This will mean fence_signal will still be called twice, but
|
||||
* the second time will be a noop since it was already signaled.
|
||||
*
|
||||
* Notes on signaled:
|
||||
* May set fence->status if returning true.
|
||||
*
|
||||
* Notes on wait:
|
||||
* Must not be NULL, set to fence_default_wait for default implementation.
|
||||
* the fence_default_wait implementation should work for any fence, as long
|
||||
* as enable_signaling works correctly.
|
||||
*
|
||||
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
|
||||
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
|
||||
* timed out. Can also return other error values on custom implementations,
|
||||
* which should be treated as if the fence is signaled. For example a hardware
|
||||
* lockup could be reported like that.
|
||||
*
|
||||
* Notes on release:
|
||||
* Can be NULL, this function allows additional commands to run on
|
||||
* destruction of the fence. Can be called from irq context.
|
||||
* If pointer is set to NULL, kfree will get called instead.
|
||||
*/
|
||||
|
||||
struct fence_ops {
|
||||
const char * (*get_driver_name)(struct fence *fence);
|
||||
const char * (*get_timeline_name)(struct fence *fence);
|
||||
bool (*enable_signaling)(struct fence *fence);
|
||||
bool (*signaled)(struct fence *fence);
|
||||
signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
|
||||
void (*release)(struct fence *fence);
|
||||
|
||||
int (*fill_driver_data)(struct fence *fence, void *data, int size);
|
||||
void (*fence_value_str)(struct fence *fence, char *str, int size);
|
||||
void (*timeline_value_str)(struct fence *fence, char *str, int size);
|
||||
};
|
||||
|
||||
void fence_init(struct fence *fence, const struct fence_ops *ops,
|
||||
spinlock_t *lock, u64 context, unsigned seqno);
|
||||
|
||||
void fence_release(struct kref *kref);
|
||||
void fence_free(struct fence *fence);
|
||||
|
||||
/**
|
||||
* fence_get - increases refcount of the fence
|
||||
* @fence: [in] fence to increase refcount of
|
||||
*
|
||||
* Returns the same fence, with refcount increased by 1.
|
||||
*/
|
||||
static inline struct fence *fence_get(struct fence *fence)
|
||||
{
|
||||
if (fence)
|
||||
kref_get(&fence->refcount);
|
||||
return fence;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
|
||||
* @fence: [in] fence to increase refcount of
|
||||
*
|
||||
* Function returns NULL if no refcount could be obtained, or the fence.
|
||||
*/
|
||||
static inline struct fence *fence_get_rcu(struct fence *fence)
|
||||
{
|
||||
if (kref_get_unless_zero(&fence->refcount))
|
||||
return fence;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_put - decreases refcount of the fence
|
||||
* @fence: [in] fence to reduce refcount of
|
||||
*/
|
||||
static inline void fence_put(struct fence *fence)
|
||||
{
|
||||
if (fence)
|
||||
kref_put(&fence->refcount, fence_release);
|
||||
}
|
||||
|
||||
int fence_signal(struct fence *fence);
|
||||
int fence_signal_locked(struct fence *fence);
|
||||
signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
|
||||
int fence_add_callback(struct fence *fence, struct fence_cb *cb,
|
||||
fence_func_t func);
|
||||
bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
|
||||
void fence_enable_sw_signaling(struct fence *fence);
|
||||
|
||||
/**
|
||||
* fence_is_signaled_locked - Return an indication if the fence is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if fence_add_callback, fence_wait or fence_enable_sw_signaling
|
||||
* haven't been called before.
|
||||
*
|
||||
* This function requires fence->lock to be held.
|
||||
*/
|
||||
static inline bool
|
||||
fence_is_signaled_locked(struct fence *fence)
|
||||
{
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return true;
|
||||
|
||||
if (fence->ops->signaled && fence->ops->signaled(fence)) {
|
||||
fence_signal_locked(fence);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_is_signaled - Return an indication if the fence is signaled yet.
|
||||
* @fence: [in] the fence to check
|
||||
*
|
||||
* Returns true if the fence was already signaled, false if not. Since this
|
||||
* function doesn't enable signaling, it is not guaranteed to ever return
|
||||
* true if fence_add_callback, fence_wait or fence_enable_sw_signaling
|
||||
* haven't been called before.
|
||||
*
|
||||
* It's recommended for seqno fences to call fence_signal when the
|
||||
* operation is complete, it makes it possible to prevent issues from
|
||||
* wraparound between time of issue and time of use by checking the return
|
||||
* value of this function before calling hardware-specific wait instructions.
|
||||
*/
|
||||
static inline bool
|
||||
fence_is_signaled(struct fence *fence)
|
||||
{
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return true;
|
||||
|
||||
if (fence->ops->signaled && fence->ops->signaled(fence)) {
|
||||
fence_signal(fence);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_is_later - return if f1 is chronologically later than f2
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
*
|
||||
* Returns true if f1 is chronologically later than f2. Both fences must be
|
||||
* from the same context, since a seqno is not re-used across contexts.
|
||||
*/
|
||||
static inline bool fence_is_later(struct fence *f1, struct fence *f2)
|
||||
{
|
||||
if (WARN_ON(f1->context != f2->context))
|
||||
return false;
|
||||
|
||||
return (int)(f1->seqno - f2->seqno) > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_later - return the chronologically later fence
|
||||
* @f1: [in] the first fence from the same context
|
||||
* @f2: [in] the second fence from the same context
|
||||
*
|
||||
* Returns NULL if both fences are signaled, otherwise the fence that would be
|
||||
* signaled last. Both fences must be from the same context, since a seqno is
|
||||
* not re-used across contexts.
|
||||
*/
|
||||
static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
|
||||
{
|
||||
if (WARN_ON(f1->context != f2->context))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
|
||||
* set if enable_signaling wasn't called, and enabling that here is
|
||||
* overkill.
|
||||
*/
|
||||
if (fence_is_later(f1, f2))
|
||||
return fence_is_signaled(f1) ? NULL : f1;
|
||||
else
|
||||
return fence_is_signaled(f2) ? NULL : f2;
|
||||
}
|
||||
|
||||
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
|
||||
signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
|
||||
bool intr, signed long timeout);
|
||||
|
||||
/**
|
||||
* fence_wait - sleep until the fence gets signaled
|
||||
* @fence: [in] the fence to wait on
|
||||
* @intr: [in] if true, do an interruptible wait
|
||||
*
|
||||
* This function will return -ERESTARTSYS if interrupted by a signal,
|
||||
* or 0 if the fence was signaled. Other error values may be
|
||||
* returned on custom implementations.
|
||||
*
|
||||
* Performs a synchronous wait on this fence. It is assumed the caller
|
||||
* directly or indirectly holds a reference to the fence, otherwise the
|
||||
* fence might be freed before return, resulting in undefined behavior.
|
||||
*/
|
||||
static inline signed long fence_wait(struct fence *fence, bool intr)
|
||||
{
|
||||
signed long ret;
|
||||
|
||||
/* Since fence_wait_timeout cannot timeout with
|
||||
* MAX_SCHEDULE_TIMEOUT, only valid return values are
|
||||
* -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
|
||||
*/
|
||||
ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
u64 fence_context_alloc(unsigned num);
|
||||
|
||||
#define FENCE_TRACE(f, fmt, args...) \
|
||||
do { \
|
||||
struct fence *__ff = (f); \
|
||||
if (IS_ENABLED(CONFIG_FENCE_TRACE)) \
|
||||
pr_info("f %llu#%u: " fmt, \
|
||||
__ff->context, __ff->seqno, ##args); \
|
||||
} while (0)
|
||||
|
||||
#define FENCE_WARN(f, fmt, args...) \
|
||||
do { \
|
||||
struct fence *__ff = (f); \
|
||||
pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
|
||||
##args); \
|
||||
} while (0)
|
||||
|
||||
#define FENCE_ERR(f, fmt, args...) \
|
||||
do { \
|
||||
struct fence *__ff = (f); \
|
||||
pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
|
||||
##args); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __LINUX_FENCE_H */
|
||||
|
|
@ -17,7 +17,7 @@ struct file_operations;
|
|||
struct vfsmount;
|
||||
struct dentry;
|
||||
struct path;
|
||||
extern struct file *alloc_file(struct path *, fmode_t mode,
|
||||
extern struct file *alloc_file(const struct path *, fmode_t mode,
|
||||
const struct file_operations *fop);
|
||||
|
||||
static inline void fput_light(struct file *file, int fput_needed)
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/cryptohash.h>
|
||||
|
||||
#include <net/sch_generic.h>
|
||||
|
||||
|
|
@ -402,14 +403,16 @@ struct bpf_prog {
|
|||
u16 jited:1, /* Is our filter JIT'ed? */
|
||||
gpl_compatible:1, /* Is filter GPL compatible? */
|
||||
cb_access:1, /* Is control block accessed? */
|
||||
dst_needed:1; /* Do we need dst entry? */
|
||||
dst_needed:1, /* Do we need dst entry? */
|
||||
xdp_adjust_head:1; /* Adjusting pkt head? */
|
||||
kmemcheck_bitfield_end(meta);
|
||||
u32 len; /* Number of filter blocks */
|
||||
enum bpf_prog_type type; /* Type of BPF program */
|
||||
u32 len; /* Number of filter blocks */
|
||||
u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
|
||||
struct bpf_prog_aux *aux; /* Auxiliary fields */
|
||||
struct sock_fprog_kern *orig_prog; /* Original BPF program */
|
||||
unsigned int (*bpf_func)(const struct sk_buff *skb,
|
||||
const struct bpf_insn *filter);
|
||||
unsigned int (*bpf_func)(const void *ctx,
|
||||
const struct bpf_insn *insn);
|
||||
/* Instructions for interpreter */
|
||||
union {
|
||||
struct sock_filter insns[0];
|
||||
|
|
@ -435,10 +438,11 @@ struct bpf_skb_data_end {
|
|||
struct xdp_buff {
|
||||
void *data;
|
||||
void *data_end;
|
||||
void *data_hard_start;
|
||||
};
|
||||
|
||||
/* compute the linear packet data range [data, data_end) which
|
||||
* will be accessed by cls_bpf and act_bpf programs
|
||||
* will be accessed by cls_bpf, act_bpf and lwt programs
|
||||
*/
|
||||
static inline void bpf_compute_data_end(struct sk_buff *skb)
|
||||
{
|
||||
|
|
@ -498,16 +502,27 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
|
|||
return BPF_PROG_RUN(prog, skb);
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
||||
struct xdp_buff *xdp)
|
||||
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
||||
struct xdp_buff *xdp)
|
||||
{
|
||||
u32 ret;
|
||||
/* Caller needs to hold rcu_read_lock() (!), otherwise program
|
||||
* can be released while still running, or map elements could be
|
||||
* freed early while still having concurrent users. XDP fastpath
|
||||
* already takes rcu_read_lock() when fetching the program, so
|
||||
* it's not necessary here anymore.
|
||||
*/
|
||||
return BPF_PROG_RUN(prog, xdp);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
ret = BPF_PROG_RUN(prog, (void *)xdp);
|
||||
rcu_read_unlock();
|
||||
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
|
||||
{
|
||||
return prog->len * sizeof(struct bpf_insn);
|
||||
}
|
||||
|
||||
return ret;
|
||||
static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
|
||||
{
|
||||
return round_up(bpf_prog_insn_size(prog) +
|
||||
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
|
||||
}
|
||||
|
||||
static inline unsigned int bpf_prog_size(unsigned int proglen)
|
||||
|
|
@ -590,11 +605,12 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
|
|||
u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
||||
bool bpf_helper_changes_skb_data(void *func);
|
||||
bool bpf_helper_changes_pkt_data(void *func);
|
||||
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
const struct bpf_insn *patch, u32 len);
|
||||
void bpf_warn_invalid_xdp_action(u32 act);
|
||||
void bpf_warn_invalid_xdp_buffer(void);
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
extern int bpf_jit_enable;
|
||||
|
|
|
|||
60
include/linux/fpga/fpga-bridge.h
Normal file
60
include/linux/fpga/fpga-bridge.h
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/fpga/fpga-mgr.h>
|
||||
|
||||
#ifndef _LINUX_FPGA_BRIDGE_H
|
||||
#define _LINUX_FPGA_BRIDGE_H
|
||||
|
||||
struct fpga_bridge;
|
||||
|
||||
/**
|
||||
* struct fpga_bridge_ops - ops for low level FPGA bridge drivers
|
||||
* @enable_show: returns the FPGA bridge's status
|
||||
* @enable_set: set a FPGA bridge as enabled or disabled
|
||||
* @fpga_bridge_remove: set FPGA into a specific state during driver remove
|
||||
*/
|
||||
struct fpga_bridge_ops {
|
||||
int (*enable_show)(struct fpga_bridge *bridge);
|
||||
int (*enable_set)(struct fpga_bridge *bridge, bool enable);
|
||||
void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fpga_bridge - FPGA bridge structure
|
||||
* @name: name of low level FPGA bridge
|
||||
* @dev: FPGA bridge device
|
||||
* @mutex: enforces exclusive reference to bridge
|
||||
* @br_ops: pointer to struct of FPGA bridge ops
|
||||
* @info: fpga image specific information
|
||||
* @node: FPGA bridge list node
|
||||
* @priv: low level driver private date
|
||||
*/
|
||||
struct fpga_bridge {
|
||||
const char *name;
|
||||
struct device dev;
|
||||
struct mutex mutex; /* for exclusive reference to bridge */
|
||||
const struct fpga_bridge_ops *br_ops;
|
||||
struct fpga_image_info *info;
|
||||
struct list_head node;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
#define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev)
|
||||
|
||||
struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
|
||||
struct fpga_image_info *info);
|
||||
void fpga_bridge_put(struct fpga_bridge *bridge);
|
||||
int fpga_bridge_enable(struct fpga_bridge *bridge);
|
||||
int fpga_bridge_disable(struct fpga_bridge *bridge);
|
||||
|
||||
int fpga_bridges_enable(struct list_head *bridge_list);
|
||||
int fpga_bridges_disable(struct list_head *bridge_list);
|
||||
void fpga_bridges_put(struct list_head *bridge_list);
|
||||
int fpga_bridge_get_to_list(struct device_node *np,
|
||||
struct fpga_image_info *info,
|
||||
struct list_head *bridge_list);
|
||||
|
||||
int fpga_bridge_register(struct device *dev, const char *name,
|
||||
const struct fpga_bridge_ops *br_ops, void *priv);
|
||||
void fpga_bridge_unregister(struct device *dev);
|
||||
|
||||
#endif /* _LINUX_FPGA_BRIDGE_H */
|
||||
|
|
@ -65,11 +65,26 @@ enum fpga_mgr_states {
|
|||
/*
|
||||
* FPGA Manager flags
|
||||
* FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
|
||||
* FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
|
||||
*/
|
||||
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
|
||||
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
|
||||
|
||||
/**
|
||||
* struct fpga_image_info - information specific to a FPGA image
|
||||
* @flags: boolean flags as defined above
|
||||
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
|
||||
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
|
||||
*/
|
||||
struct fpga_image_info {
|
||||
u32 flags;
|
||||
u32 enable_timeout_us;
|
||||
u32 disable_timeout_us;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct fpga_manager_ops - ops for low level fpga manager drivers
|
||||
* @initial_header_size: Maximum number of bytes that should be passed into write_init
|
||||
* @state: returns an enum value of the FPGA's state
|
||||
* @write_init: prepare the FPGA to receive confuration data
|
||||
* @write: write count bytes of configuration data to the FPGA
|
||||
|
|
@ -81,11 +96,14 @@ enum fpga_mgr_states {
|
|||
* called, so leaving them out is fine.
|
||||
*/
|
||||
struct fpga_manager_ops {
|
||||
size_t initial_header_size;
|
||||
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
|
||||
int (*write_init)(struct fpga_manager *mgr, u32 flags,
|
||||
int (*write_init)(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
const char *buf, size_t count);
|
||||
int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
|
||||
int (*write_complete)(struct fpga_manager *mgr, u32 flags);
|
||||
int (*write_complete)(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info);
|
||||
void (*fpga_remove)(struct fpga_manager *mgr);
|
||||
};
|
||||
|
||||
|
|
@ -109,14 +127,17 @@ struct fpga_manager {
|
|||
|
||||
#define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
|
||||
|
||||
int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
|
||||
int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
|
||||
const char *buf, size_t count);
|
||||
|
||||
int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
|
||||
int fpga_mgr_firmware_load(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
const char *image_name);
|
||||
|
||||
struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
|
||||
|
||||
struct fpga_manager *fpga_mgr_get(struct device *dev);
|
||||
|
||||
void fpga_mgr_put(struct fpga_manager *mgr);
|
||||
|
||||
int fpga_mgr_register(struct device *dev, const char *name,
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@
|
|||
#include <linux/uidgid.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/percpu-rwsem.h>
|
||||
#include <linux/blk_types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/percpu-rwsem.h>
|
||||
#include <linux/delayed_call.h>
|
||||
|
|
@ -38,6 +37,7 @@
|
|||
|
||||
struct backing_dev_info;
|
||||
struct bdi_writeback;
|
||||
struct bio;
|
||||
struct export_operations;
|
||||
struct hd_geometry;
|
||||
struct iovec;
|
||||
|
|
@ -151,58 +151,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
|||
*/
|
||||
#define CHECK_IOVEC_ONLY -1
|
||||
|
||||
/*
|
||||
* The below are the various read and write flags that we support. Some of
|
||||
* them include behavioral modifiers that send information down to the
|
||||
* block layer and IO scheduler. They should be used along with a req_op.
|
||||
* Terminology:
|
||||
*
|
||||
* The block layer uses device plugging to defer IO a little bit, in
|
||||
* the hope that we will see more IO very shortly. This increases
|
||||
* coalescing of adjacent IO and thus reduces the number of IOs we
|
||||
* have to send to the device. It also allows for better queuing,
|
||||
* if the IO isn't mergeable. If the caller is going to be waiting
|
||||
* for the IO, then he must ensure that the device is unplugged so
|
||||
* that the IO is dispatched to the driver.
|
||||
*
|
||||
* All IO is handled async in Linux. This is fine for background
|
||||
* writes, but for reads or writes that someone waits for completion
|
||||
* on, we want to notify the block layer and IO scheduler so that they
|
||||
* know about it. That allows them to make better scheduling
|
||||
* decisions. So when the below references 'sync' and 'async', it
|
||||
* is referencing this priority hint.
|
||||
*
|
||||
* With that in mind, the available types are:
|
||||
*
|
||||
* READ A normal read operation. Device will be plugged.
|
||||
* READ_SYNC A synchronous read. Device is not plugged, caller can
|
||||
* immediately wait on this read without caring about
|
||||
* unplugging.
|
||||
* WRITE A normal async write. Device will be plugged.
|
||||
* WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
|
||||
* the hint that someone will be waiting on this IO
|
||||
* shortly. The write equivalent of READ_SYNC.
|
||||
* WRITE_ODIRECT Special case write for O_DIRECT only.
|
||||
* WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
|
||||
* WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
|
||||
* non-volatile media on completion.
|
||||
* WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
|
||||
* by a cache flush and data is guaranteed to be on
|
||||
* non-volatile media on completion.
|
||||
*
|
||||
*/
|
||||
#define RW_MASK REQ_OP_WRITE
|
||||
|
||||
#define READ REQ_OP_READ
|
||||
#define WRITE REQ_OP_WRITE
|
||||
|
||||
#define READ_SYNC REQ_SYNC
|
||||
#define WRITE_SYNC (REQ_SYNC | REQ_NOIDLE)
|
||||
#define WRITE_ODIRECT REQ_SYNC
|
||||
#define WRITE_FLUSH (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
|
||||
#define WRITE_FUA (REQ_SYNC | REQ_NOIDLE | REQ_FUA)
|
||||
#define WRITE_FLUSH_FUA (REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
|
||||
|
||||
/*
|
||||
* Attribute flags. These should be or-ed together to figure out what
|
||||
* has been changed!
|
||||
|
|
@ -595,6 +543,7 @@ is_uncached_acl(struct posix_acl *acl)
|
|||
#define IOP_LOOKUP 0x0002
|
||||
#define IOP_NOFOLLOW 0x0004
|
||||
#define IOP_XATTR 0x0008
|
||||
#define IOP_DEFAULT_READLINK 0x0010
|
||||
|
||||
/*
|
||||
* Keep mostly read-only and often accessed (especially for
|
||||
|
|
@ -1778,11 +1727,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
|
|||
unsigned long, loff_t *, int);
|
||||
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
|
||||
loff_t, size_t, unsigned int);
|
||||
extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
|
||||
struct inode *inode_out, loff_t pos_out,
|
||||
u64 *len, bool is_dedupe);
|
||||
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out, u64 len);
|
||||
extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
|
||||
struct inode *dest, loff_t destoff,
|
||||
loff_t len, bool *is_same);
|
||||
extern int vfs_dedupe_file_range(struct file *file,
|
||||
struct file_dedupe_range *same);
|
||||
|
||||
static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
|
||||
struct file *file_out, loff_t pos_out,
|
||||
u64 len)
|
||||
{
|
||||
int ret;
|
||||
|
||||
sb_start_write(file_inode(file_out)->i_sb);
|
||||
ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
|
||||
sb_end_write(file_inode(file_out)->i_sb);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct super_operations {
|
||||
struct inode *(*alloc_inode)(struct super_block *sb);
|
||||
void (*destroy_inode)(struct inode *);
|
||||
|
|
@ -2123,11 +2091,11 @@ extern int may_umount_tree(struct vfsmount *);
|
|||
extern int may_umount(struct vfsmount *);
|
||||
extern long do_mount(const char *, const char __user *,
|
||||
const char *, unsigned long, void *);
|
||||
extern struct vfsmount *collect_mounts(struct path *);
|
||||
extern struct vfsmount *collect_mounts(const struct path *);
|
||||
extern void drop_collected_mounts(struct vfsmount *);
|
||||
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
|
||||
struct vfsmount *);
|
||||
extern int vfs_statfs(struct path *, struct kstatfs *);
|
||||
extern int vfs_statfs(const struct path *, struct kstatfs *);
|
||||
extern int user_statfs(const char __user *, struct kstatfs *);
|
||||
extern int fd_statfs(int, struct kstatfs *);
|
||||
extern int vfs_ustat(dev_t, struct kstatfs *);
|
||||
|
|
@ -2499,19 +2467,6 @@ extern void make_bad_inode(struct inode *);
|
|||
extern bool is_bad_inode(struct inode *);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
static inline bool op_is_write(unsigned int op)
|
||||
{
|
||||
return op == REQ_OP_READ ? false : true;
|
||||
}
|
||||
|
||||
/*
|
||||
* return data direction, READ or WRITE
|
||||
*/
|
||||
static inline int bio_data_dir(struct bio *bio)
|
||||
{
|
||||
return op_is_write(bio_op(bio)) ? WRITE : READ;
|
||||
}
|
||||
|
||||
extern void check_disk_size_change(struct gendisk *disk,
|
||||
struct block_device *bdev);
|
||||
extern int revalidate_disk(struct gendisk *);
|
||||
|
|
@ -2709,7 +2664,7 @@ extern struct file * open_exec(const char *);
|
|||
|
||||
/* fs/dcache.c -- generic fs support functions */
|
||||
extern bool is_subdir(struct dentry *, struct dentry *);
|
||||
extern bool path_is_under(struct path *, struct path *);
|
||||
extern bool path_is_under(const struct path *, const struct path *);
|
||||
|
||||
extern char *file_path(struct file *, char *, int);
|
||||
|
||||
|
|
@ -2782,7 +2737,6 @@ static inline void remove_inode_hash(struct inode *inode)
|
|||
extern void inode_sb_list_add(struct inode *inode);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
extern blk_qc_t submit_bio(struct bio *);
|
||||
extern int bdev_read_only(struct block_device *);
|
||||
#endif
|
||||
extern int set_blocksize(struct block_device *, int);
|
||||
|
|
@ -2914,7 +2868,6 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
|
|||
extern int page_symlink(struct inode *inode, const char *symname, int len);
|
||||
extern const struct inode_operations page_symlink_inode_operations;
|
||||
extern void kfree_link(void *);
|
||||
extern int generic_readlink(struct dentry *, char __user *, int);
|
||||
extern void generic_fillattr(struct inode *, struct kstat *);
|
||||
int vfs_getattr_nosec(struct path *path, struct kstat *stat);
|
||||
extern int vfs_getattr(struct path *, struct kstat *);
|
||||
|
|
@ -2935,6 +2888,7 @@ extern int vfs_lstat(const char __user *, struct kstat *);
|
|||
extern int vfs_fstat(unsigned int, struct kstat *);
|
||||
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
|
||||
extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
|
||||
extern int vfs_readlink(struct dentry *, char __user *, int);
|
||||
|
||||
extern int __generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo,
|
||||
|
|
@ -2949,8 +2903,10 @@ extern void put_filesystem(struct file_system_type *fs);
|
|||
extern struct file_system_type *get_fs_type(const char *name);
|
||||
extern struct super_block *get_super(struct block_device *);
|
||||
extern struct super_block *get_super_thawed(struct block_device *);
|
||||
extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
|
||||
extern struct super_block *get_active_super(struct block_device *bdev);
|
||||
extern void drop_super(struct super_block *sb);
|
||||
extern void drop_super_exclusive(struct super_block *sb);
|
||||
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
|
||||
extern void iterate_supers_type(struct file_system_type *,
|
||||
void (*)(struct super_block *, void *), void *);
|
||||
|
|
|
|||
|
|
@ -18,73 +18,9 @@
|
|||
#include <crypto/skcipher.h>
|
||||
#include <uapi/linux/fs.h>
|
||||
|
||||
#define FS_KEY_DERIVATION_NONCE_SIZE 16
|
||||
#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
|
||||
#define FS_CRYPTO_BLOCK_SIZE 16
|
||||
|
||||
#define FS_POLICY_FLAGS_PAD_4 0x00
|
||||
#define FS_POLICY_FLAGS_PAD_8 0x01
|
||||
#define FS_POLICY_FLAGS_PAD_16 0x02
|
||||
#define FS_POLICY_FLAGS_PAD_32 0x03
|
||||
#define FS_POLICY_FLAGS_PAD_MASK 0x03
|
||||
#define FS_POLICY_FLAGS_VALID 0x03
|
||||
|
||||
/* Encryption algorithms */
|
||||
#define FS_ENCRYPTION_MODE_INVALID 0
|
||||
#define FS_ENCRYPTION_MODE_AES_256_XTS 1
|
||||
#define FS_ENCRYPTION_MODE_AES_256_GCM 2
|
||||
#define FS_ENCRYPTION_MODE_AES_256_CBC 3
|
||||
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
|
||||
|
||||
/**
|
||||
* Encryption context for inode
|
||||
*
|
||||
* Protector format:
|
||||
* 1 byte: Protector format (1 = this version)
|
||||
* 1 byte: File contents encryption mode
|
||||
* 1 byte: File names encryption mode
|
||||
* 1 byte: Flags
|
||||
* 8 bytes: Master Key descriptor
|
||||
* 16 bytes: Encryption Key derivation nonce
|
||||
*/
|
||||
struct fscrypt_context {
|
||||
u8 format;
|
||||
u8 contents_encryption_mode;
|
||||
u8 filenames_encryption_mode;
|
||||
u8 flags;
|
||||
u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
|
||||
u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
|
||||
} __packed;
|
||||
|
||||
/* Encryption parameters */
|
||||
#define FS_XTS_TWEAK_SIZE 16
|
||||
#define FS_AES_128_ECB_KEY_SIZE 16
|
||||
#define FS_AES_256_GCM_KEY_SIZE 32
|
||||
#define FS_AES_256_CBC_KEY_SIZE 32
|
||||
#define FS_AES_256_CTS_KEY_SIZE 32
|
||||
#define FS_AES_256_XTS_KEY_SIZE 64
|
||||
#define FS_MAX_KEY_SIZE 64
|
||||
|
||||
#define FS_KEY_DESC_PREFIX "fscrypt:"
|
||||
#define FS_KEY_DESC_PREFIX_SIZE 8
|
||||
|
||||
/* This is passed in from userspace into the kernel keyring */
|
||||
struct fscrypt_key {
|
||||
u32 mode;
|
||||
u8 raw[FS_MAX_KEY_SIZE];
|
||||
u32 size;
|
||||
} __packed;
|
||||
|
||||
struct fscrypt_info {
|
||||
u8 ci_data_mode;
|
||||
u8 ci_filename_mode;
|
||||
u8 ci_flags;
|
||||
struct crypto_skcipher *ci_ctfm;
|
||||
struct key *ci_keyring_key;
|
||||
u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
|
||||
};
|
||||
|
||||
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
|
||||
#define FS_WRITE_PATH_FL 0x00000002
|
||||
struct fscrypt_info;
|
||||
|
||||
struct fscrypt_ctx {
|
||||
union {
|
||||
|
|
@ -102,19 +38,6 @@ struct fscrypt_ctx {
|
|||
u8 mode; /* Encryption mode for tfm */
|
||||
};
|
||||
|
||||
struct fscrypt_completion_result {
|
||||
struct completion completion;
|
||||
int res;
|
||||
};
|
||||
|
||||
#define DECLARE_FS_COMPLETION_RESULT(ecr) \
|
||||
struct fscrypt_completion_result ecr = { \
|
||||
COMPLETION_INITIALIZER((ecr).completion), 0 }
|
||||
|
||||
#define FS_FNAME_NUM_SCATTER_ENTRIES 4
|
||||
#define FS_CRYPTO_BLOCK_SIZE 16
|
||||
#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
|
||||
|
||||
/**
|
||||
* For encrypted symlinks, the ciphertext length is stored at the beginning
|
||||
* of the string in little-endian format.
|
||||
|
|
@ -153,10 +76,16 @@ struct fscrypt_name {
|
|||
#define fname_name(p) ((p)->disk_name.name)
|
||||
#define fname_len(p) ((p)->disk_name.len)
|
||||
|
||||
/*
|
||||
* fscrypt superblock flags
|
||||
*/
|
||||
#define FS_CFLG_OWN_PAGES (1U << 1)
|
||||
|
||||
/*
|
||||
* crypto opertions for filesystems
|
||||
*/
|
||||
struct fscrypt_operations {
|
||||
unsigned int flags;
|
||||
int (*get_context)(struct inode *, void *, size_t);
|
||||
int (*key_prefix)(struct inode *, u8 **);
|
||||
int (*prepare_context)(struct inode *);
|
||||
|
|
@ -206,7 +135,7 @@ static inline struct page *fscrypt_control_page(struct page *page)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline int fscrypt_has_encryption_key(struct inode *inode)
|
||||
static inline int fscrypt_has_encryption_key(const struct inode *inode)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
|
||||
return (inode->i_crypt_info != NULL);
|
||||
|
|
@ -238,25 +167,25 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
|
|||
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
|
||||
/* crypto.c */
|
||||
extern struct kmem_cache *fscrypt_info_cachep;
|
||||
int fscrypt_initialize(void);
|
||||
|
||||
extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
|
||||
extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
|
||||
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
|
||||
extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
|
||||
extern int fscrypt_decrypt_page(struct page *);
|
||||
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
|
||||
unsigned int, unsigned int,
|
||||
u64, gfp_t);
|
||||
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
|
||||
unsigned int, u64);
|
||||
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
|
||||
extern void fscrypt_pullback_bio_page(struct page **, bool);
|
||||
extern void fscrypt_restore_control_page(struct page *);
|
||||
extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
|
||||
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
|
||||
unsigned int);
|
||||
/* policy.c */
|
||||
extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
|
||||
extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
|
||||
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
|
||||
extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
|
||||
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
|
||||
extern int fscrypt_inherit_context(struct inode *, struct inode *,
|
||||
void *, bool);
|
||||
/* keyinfo.c */
|
||||
extern int get_crypt_info(struct inode *);
|
||||
extern int fscrypt_get_encryption_info(struct inode *);
|
||||
extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
|
||||
|
||||
|
|
@ -264,8 +193,8 @@ extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
|
|||
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
|
||||
int lookup, struct fscrypt_name *);
|
||||
extern void fscrypt_free_filename(struct fscrypt_name *);
|
||||
extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
|
||||
extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
|
||||
extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
|
||||
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
|
||||
struct fscrypt_str *);
|
||||
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
|
||||
extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
|
||||
|
|
@ -275,7 +204,7 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
|
|||
#endif
|
||||
|
||||
/* crypto.c */
|
||||
static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
|
||||
static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i,
|
||||
gfp_t f)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
|
@ -286,13 +215,18 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
|
|||
return;
|
||||
}
|
||||
|
||||
static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
|
||||
struct page *p, gfp_t f)
|
||||
static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i,
|
||||
struct page *p,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
u64 lblk_num, gfp_t f)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline int fscrypt_notsupp_decrypt_page(struct page *p)
|
||||
static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p,
|
||||
unsigned int len, unsigned int offs,
|
||||
u64 lblk_num)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
@ -313,21 +247,21 @@ static inline void fscrypt_notsupp_restore_control_page(struct page *p)
|
|||
return;
|
||||
}
|
||||
|
||||
static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
|
||||
static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p,
|
||||
sector_t s, unsigned int f)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* policy.c */
|
||||
static inline int fscrypt_notsupp_process_policy(struct file *f,
|
||||
const struct fscrypt_policy *p)
|
||||
static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f,
|
||||
const void __user *arg)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int fscrypt_notsupp_get_policy(struct inode *i,
|
||||
struct fscrypt_policy *p)
|
||||
static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f,
|
||||
void __user *arg)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,83 +29,112 @@
|
|||
* #ifdefs.
|
||||
*/
|
||||
struct ccsr_guts {
|
||||
__be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
|
||||
__be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
|
||||
__be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */
|
||||
__be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
|
||||
__be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
|
||||
__be32 pordevsr2; /* 0x.0014 - POR device status register 2 */
|
||||
u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */
|
||||
u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */
|
||||
u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and
|
||||
* Control Register
|
||||
*/
|
||||
u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */
|
||||
u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */
|
||||
u32 pordevsr2; /* 0x.0014 - POR device status register 2 */
|
||||
u8 res018[0x20 - 0x18];
|
||||
__be32 porcir; /* 0x.0020 - POR Configuration Information Register */
|
||||
u32 porcir; /* 0x.0020 - POR Configuration Information
|
||||
* Register
|
||||
*/
|
||||
u8 res024[0x30 - 0x24];
|
||||
__be32 gpiocr; /* 0x.0030 - GPIO Control Register */
|
||||
u32 gpiocr; /* 0x.0030 - GPIO Control Register */
|
||||
u8 res034[0x40 - 0x34];
|
||||
__be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */
|
||||
u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data
|
||||
* Register
|
||||
*/
|
||||
u8 res044[0x50 - 0x44];
|
||||
__be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */
|
||||
u32 gpindr; /* 0x.0050 - General-Purpose Input Data
|
||||
* Register
|
||||
*/
|
||||
u8 res054[0x60 - 0x54];
|
||||
__be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */
|
||||
__be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */
|
||||
__be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
|
||||
u32 pmuxcr; /* 0x.0060 - Alternate Function Signal
|
||||
* Multiplex Control
|
||||
*/
|
||||
u32 pmuxcr2; /* 0x.0064 - Alternate function signal
|
||||
* multiplex control 2
|
||||
*/
|
||||
u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
|
||||
u8 res06c[0x70 - 0x6c];
|
||||
__be32 devdisr; /* 0x.0070 - Device Disable Control */
|
||||
u32 devdisr; /* 0x.0070 - Device Disable Control */
|
||||
#define CCSR_GUTS_DEVDISR_TB1 0x00001000
|
||||
#define CCSR_GUTS_DEVDISR_TB0 0x00004000
|
||||
__be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
|
||||
u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
|
||||
u8 res078[0x7c - 0x78];
|
||||
__be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
|
||||
__be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */
|
||||
__be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */
|
||||
__be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */
|
||||
__be32 pmcdr; /* 0x.008c - 4Power management clock disable register */
|
||||
__be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
|
||||
__be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */
|
||||
__be32 ectrstcr; /* 0x.0098 - Exception reset control register */
|
||||
__be32 autorstsr; /* 0x.009c - Automatic reset status register */
|
||||
__be32 pvr; /* 0x.00a0 - Processor Version Register */
|
||||
__be32 svr; /* 0x.00a4 - System Version Register */
|
||||
u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control
|
||||
* Register
|
||||
*/
|
||||
u32 powmgtcsr; /* 0x.0080 - Power Management Status and
|
||||
* Control Register
|
||||
*/
|
||||
u32 pmrccr; /* 0x.0084 - Power Management Reset Counter
|
||||
* Configuration Register
|
||||
*/
|
||||
u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter
|
||||
* Configuration Register
|
||||
*/
|
||||
u32 pmcdr; /* 0x.008c - 4Power management clock disable
|
||||
* register
|
||||
*/
|
||||
u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */
|
||||
u32 rstrscr; /* 0x.0094 - Reset Request Status and
|
||||
* Control Register
|
||||
*/
|
||||
u32 ectrstcr; /* 0x.0098 - Exception reset control register */
|
||||
u32 autorstsr; /* 0x.009c - Automatic reset status register */
|
||||
u32 pvr; /* 0x.00a0 - Processor Version Register */
|
||||
u32 svr; /* 0x.00a4 - System Version Register */
|
||||
u8 res0a8[0xb0 - 0xa8];
|
||||
__be32 rstcr; /* 0x.00b0 - Reset Control Register */
|
||||
u32 rstcr; /* 0x.00b0 - Reset Control Register */
|
||||
u8 res0b4[0xc0 - 0xb4];
|
||||
__be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
|
||||
u32 iovselsr; /* 0x.00c0 - I/O voltage select status register
|
||||
Called 'elbcvselcr' on 86xx SOCs */
|
||||
u8 res0c4[0x100 - 0xc4];
|
||||
__be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
|
||||
u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
|
||||
There are 16 registers */
|
||||
u8 res140[0x224 - 0x140];
|
||||
__be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
|
||||
__be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
|
||||
u32 iodelay1; /* 0x.0224 - IO delay control register 1 */
|
||||
u32 iodelay2; /* 0x.0228 - IO delay control register 2 */
|
||||
u8 res22c[0x604 - 0x22c];
|
||||
__be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
|
||||
u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */
|
||||
u8 res608[0x800 - 0x608];
|
||||
__be32 clkdvdr; /* 0x.0800 - Clock Divide Register */
|
||||
u32 clkdvdr; /* 0x.0800 - Clock Divide Register */
|
||||
u8 res804[0x900 - 0x804];
|
||||
__be32 ircr; /* 0x.0900 - Infrared Control Register */
|
||||
u32 ircr; /* 0x.0900 - Infrared Control Register */
|
||||
u8 res904[0x908 - 0x904];
|
||||
__be32 dmacr; /* 0x.0908 - DMA Control Register */
|
||||
u32 dmacr; /* 0x.0908 - DMA Control Register */
|
||||
u8 res90c[0x914 - 0x90c];
|
||||
__be32 elbccr; /* 0x.0914 - eLBC Control Register */
|
||||
u32 elbccr; /* 0x.0914 - eLBC Control Register */
|
||||
u8 res918[0xb20 - 0x918];
|
||||
__be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
|
||||
__be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
|
||||
__be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
|
||||
u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */
|
||||
u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */
|
||||
u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */
|
||||
u8 resb2c[0xe00 - 0xb2c];
|
||||
__be32 clkocr; /* 0x.0e00 - Clock Out Select Register */
|
||||
u32 clkocr; /* 0x.0e00 - Clock Out Select Register */
|
||||
u8 rese04[0xe10 - 0xe04];
|
||||
__be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
|
||||
u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */
|
||||
u8 rese14[0xe20 - 0xe14];
|
||||
__be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
|
||||
__be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */
|
||||
u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */
|
||||
u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override
|
||||
* register
|
||||
*/
|
||||
u8 rese28[0xf04 - 0xe28];
|
||||
__be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
|
||||
__be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
|
||||
u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */
|
||||
u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */
|
||||
u8 resf0c[0xf2c - 0xf0c];
|
||||
__be32 itcr; /* 0x.0f2c - Internal transaction control register */
|
||||
u32 itcr; /* 0x.0f2c - Internal transaction control
|
||||
* register
|
||||
*/
|
||||
u8 resf30[0xf40 - 0xf30];
|
||||
__be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
|
||||
__be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
|
||||
u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */
|
||||
u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
u32 fsl_guts_get_svr(void);
|
||||
|
||||
/* Alternate function signal multiplex control */
|
||||
#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
|
||||
|
|
|
|||
|
|
@ -100,6 +100,7 @@ struct fsl_usb2_platform_data {
|
|||
unsigned already_suspended:1;
|
||||
unsigned has_fsl_erratum_a007792:1;
|
||||
unsigned has_fsl_erratum_a005275:1;
|
||||
unsigned has_fsl_erratum_a005697:1;
|
||||
unsigned check_phy_clk_valid:1;
|
||||
|
||||
/* register save area for suspend/resume */
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
#include <linux/bug.h>
|
||||
|
||||
/* Notify this dentry's parent about a child's events. */
|
||||
static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
|
||||
static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
|
||||
{
|
||||
if (!dentry)
|
||||
dentry = path->dentry;
|
||||
|
|
@ -28,7 +28,7 @@ static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u3
|
|||
/* simple call site for access decisions */
|
||||
static inline int fsnotify_perm(struct file *file, int mask)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
const struct path *path = &file->f_path;
|
||||
/*
|
||||
* Do not use file_inode() here or anywhere in this file to get the
|
||||
* inode. That would break *notity on overlayfs.
|
||||
|
|
@ -176,7 +176,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
|
|||
*/
|
||||
static inline void fsnotify_access(struct file *file)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
const struct path *path = &file->f_path;
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
__u32 mask = FS_ACCESS;
|
||||
|
||||
|
|
@ -194,7 +194,7 @@ static inline void fsnotify_access(struct file *file)
|
|||
*/
|
||||
static inline void fsnotify_modify(struct file *file)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
const struct path *path = &file->f_path;
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
__u32 mask = FS_MODIFY;
|
||||
|
||||
|
|
@ -212,7 +212,7 @@ static inline void fsnotify_modify(struct file *file)
|
|||
*/
|
||||
static inline void fsnotify_open(struct file *file)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
const struct path *path = &file->f_path;
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
__u32 mask = FS_OPEN;
|
||||
|
||||
|
|
@ -228,7 +228,7 @@ static inline void fsnotify_open(struct file *file)
|
|||
*/
|
||||
static inline void fsnotify_close(struct file *file)
|
||||
{
|
||||
struct path *path = &file->f_path;
|
||||
const struct path *path = &file->f_path;
|
||||
struct inode *inode = path->dentry->d_inode;
|
||||
fmode_t mode = file->f_mode;
|
||||
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ struct fsnotify_ops {
|
|||
struct inode *inode,
|
||||
struct fsnotify_mark *inode_mark,
|
||||
struct fsnotify_mark *vfsmount_mark,
|
||||
u32 mask, void *data, int data_type,
|
||||
u32 mask, const void *data, int data_type,
|
||||
const unsigned char *file_name, u32 cookie);
|
||||
void (*free_group_priv)(struct fsnotify_group *group);
|
||||
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
|
||||
|
|
@ -245,9 +245,9 @@ struct fsnotify_mark {
|
|||
/* called from the vfs helpers */
|
||||
|
||||
/* main fsnotify call to send events */
|
||||
extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
|
||||
extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
|
||||
const unsigned char *name, u32 cookie);
|
||||
extern int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
|
||||
extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
|
||||
extern void __fsnotify_inode_delete(struct inode *inode);
|
||||
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
|
||||
extern u32 fsnotify_get_cookie(void);
|
||||
|
|
@ -357,13 +357,13 @@ extern void fsnotify_init_event(struct fsnotify_event *event,
|
|||
|
||||
#else
|
||||
|
||||
static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
|
||||
static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
|
||||
const unsigned char *name, u32 cookie)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
|
||||
static inline int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -398,6 +398,7 @@ int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
|
|||
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
|
||||
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
|
||||
void ftrace_free_filter(struct ftrace_ops *ops);
|
||||
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
|
||||
|
||||
int register_ftrace_command(struct ftrace_func_command *cmd);
|
||||
int unregister_ftrace_command(struct ftrace_func_command *cmd);
|
||||
|
|
@ -645,6 +646,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
|
|||
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
|
||||
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
|
||||
#define ftrace_free_filter(ops) do { } while (0)
|
||||
#define ftrace_ops_set_global_filter(ops) do { } while (0)
|
||||
|
||||
static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos) { return -ENODEV; }
|
||||
|
|
@ -945,6 +947,10 @@ extern int __disable_trace_on_warning;
|
|||
#define INIT_TRACE_RECURSION .trace_recursion = 0,
|
||||
#endif
|
||||
|
||||
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
#else /* CONFIG_TRACING */
|
||||
static inline void disable_trace_on_warning(void) { }
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
#ifndef _LINUX_FUTEX_H
|
||||
#define _LINUX_FUTEX_H
|
||||
|
||||
#include <linux/ktime.h>
|
||||
#include <uapi/linux/futex.h>
|
||||
|
||||
struct inode;
|
||||
struct mm_struct;
|
||||
struct task_struct;
|
||||
union ktime;
|
||||
|
||||
long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
|
||||
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
|
||||
u32 __user *uaddr2, u32 val2, u32 val3);
|
||||
|
||||
extern int
|
||||
|
|
|
|||
|
|
@ -17,8 +17,9 @@ enum fwnode_type {
|
|||
FWNODE_OF,
|
||||
FWNODE_ACPI,
|
||||
FWNODE_ACPI_DATA,
|
||||
FWNODE_ACPI_STATIC,
|
||||
FWNODE_PDATA,
|
||||
FWNODE_IRQCHIP,
|
||||
FWNODE_IRQCHIP
|
||||
};
|
||||
|
||||
struct fwnode_handle {
|
||||
|
|
|
|||
|
|
@ -259,16 +259,7 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
|
|||
* {{{2
|
||||
*/
|
||||
#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
|
||||
static struct genl_family ZZZ_genl_family __read_mostly = {
|
||||
.id = GENL_ID_GENERATE,
|
||||
.name = __stringify(GENL_MAGIC_FAMILY),
|
||||
.version = GENL_MAGIC_VERSION,
|
||||
#ifdef GENL_MAGIC_FAMILY_HDRSZ
|
||||
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
|
||||
#endif
|
||||
.maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
|
||||
};
|
||||
|
||||
static struct genl_family ZZZ_genl_family;
|
||||
/*
|
||||
* Magic: define multicast groups
|
||||
* Magic: define multicast group registration helper
|
||||
|
|
@ -302,11 +293,23 @@ static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
|
|||
#undef GENL_mc_group
|
||||
#define GENL_mc_group(group)
|
||||
|
||||
static struct genl_family ZZZ_genl_family __ro_after_init = {
|
||||
.name = __stringify(GENL_MAGIC_FAMILY),
|
||||
.version = GENL_MAGIC_VERSION,
|
||||
#ifdef GENL_MAGIC_FAMILY_HDRSZ
|
||||
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
|
||||
#endif
|
||||
.maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
|
||||
.ops = ZZZ_genl_ops,
|
||||
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
|
||||
.mcgrps = ZZZ_genl_mcgrps,
|
||||
.n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
|
||||
.module = THIS_MODULE,
|
||||
};
|
||||
|
||||
int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
|
||||
{
|
||||
return genl_register_family_with_ops_groups(&ZZZ_genl_family, \
|
||||
ZZZ_genl_ops, \
|
||||
ZZZ_genl_mcgrps);
|
||||
return genl_register_family(&ZZZ_genl_family);
|
||||
}
|
||||
|
||||
void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
|
||||
|
|
|
|||
|
|
@ -506,6 +506,8 @@ extern void free_hot_cold_page(struct page *page, bool cold);
|
|||
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
|
||||
|
||||
struct page_frag_cache;
|
||||
extern void __page_frag_drain(struct page *page, unsigned int order,
|
||||
unsigned int count);
|
||||
extern void *__alloc_page_frag(struct page_frag_cache *nc,
|
||||
unsigned int fragsz, gfp_t gfp_mask);
|
||||
extern void __free_page_frag(void *addr);
|
||||
|
|
|
|||
|
|
@ -82,8 +82,6 @@ enum single_ended_mode {
|
|||
* implies that if the chip supports IRQs, these IRQs need to be threaded
|
||||
* as the chip access may sleep when e.g. reading out the IRQ status
|
||||
* registers.
|
||||
* @irq_not_threaded: flag must be set if @can_sleep is set but the
|
||||
* IRQs don't need to be threaded
|
||||
* @read_reg: reader function for generic GPIO
|
||||
* @write_reg: writer function for generic GPIO
|
||||
* @pin2mask: some generic GPIO controllers work with the big-endian bits
|
||||
|
|
@ -91,7 +89,7 @@ enum single_ended_mode {
|
|||
* bit. This callback assigns the right bit mask.
|
||||
* @reg_dat: data (in) register for generic GPIO
|
||||
* @reg_set: output set register (out=high) for generic GPIO
|
||||
* @reg_clk: output clear register (out=low) for generic GPIO
|
||||
* @reg_clr: output clear register (out=low) for generic GPIO
|
||||
* @reg_dir: direction setting register for generic GPIO
|
||||
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
|
||||
* <register width> * 8
|
||||
|
|
@ -109,8 +107,10 @@ enum single_ended_mode {
|
|||
* for GPIO IRQs, provided by GPIO driver
|
||||
* @irq_default_type: default IRQ triggering type applied during GPIO driver
|
||||
* initialization, provided by GPIO driver
|
||||
* @irq_parent: GPIO IRQ chip parent/bank linux irq number,
|
||||
* provided by GPIO driver
|
||||
* @irq_chained_parent: GPIO IRQ chip parent/bank linux irq number,
|
||||
* provided by GPIO driver for chained interrupt (not for nested
|
||||
* interrupts).
|
||||
* @irq_nested: True if set the interrupt handling is nested.
|
||||
* @irq_need_valid_mask: If set core allocates @irq_valid_mask with all
|
||||
* bits set to one
|
||||
* @irq_valid_mask: If not %NULL holds bitmask of GPIOs which are valid to
|
||||
|
|
@ -166,7 +166,6 @@ struct gpio_chip {
|
|||
u16 ngpio;
|
||||
const char *const *names;
|
||||
bool can_sleep;
|
||||
bool irq_not_threaded;
|
||||
|
||||
#if IS_ENABLED(CONFIG_GPIO_GENERIC)
|
||||
unsigned long (*read_reg)(void __iomem *reg);
|
||||
|
|
@ -192,7 +191,8 @@ struct gpio_chip {
|
|||
unsigned int irq_base;
|
||||
irq_flow_handler_t irq_handler;
|
||||
unsigned int irq_default_type;
|
||||
int irq_parent;
|
||||
int irq_chained_parent;
|
||||
bool irq_nested;
|
||||
bool irq_need_valid_mask;
|
||||
unsigned long *irq_valid_mask;
|
||||
struct lock_class_key *lock_key;
|
||||
|
|
@ -270,24 +270,40 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
|
|||
int parent_irq,
|
||||
irq_flow_handler_t parent_handler);
|
||||
|
||||
void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
int parent_irq);
|
||||
|
||||
int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool nested,
|
||||
struct lock_class_key *lock_key);
|
||||
|
||||
/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
|
||||
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type)
|
||||
{
|
||||
return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
|
||||
handler, type, true, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
#define gpiochip_irqchip_add(...) \
|
||||
( \
|
||||
({ \
|
||||
static struct lock_class_key _key; \
|
||||
_gpiochip_irqchip_add(__VA_ARGS__, &_key); \
|
||||
_gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
|
||||
}) \
|
||||
)
|
||||
#else
|
||||
#define gpiochip_irqchip_add(...) \
|
||||
_gpiochip_irqchip_add(__VA_ARGS__, NULL)
|
||||
_gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_GPIOLIB_IRQCHIP */
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@
|
|||
#define _GPIO_KEYS_H
|
||||
|
||||
struct device;
|
||||
struct gpio_desc;
|
||||
|
||||
/**
|
||||
* struct gpio_keys_button - configuration parameters
|
||||
|
|
@ -18,7 +17,6 @@ struct gpio_desc;
|
|||
* disable button via sysfs
|
||||
* @value: axis value for %EV_ABS
|
||||
* @irq: Irq number in case of interrupt keys
|
||||
* @gpiod: GPIO descriptor
|
||||
*/
|
||||
struct gpio_keys_button {
|
||||
unsigned int code;
|
||||
|
|
@ -31,7 +29,6 @@ struct gpio_keys_button {
|
|||
bool can_disable;
|
||||
int value;
|
||||
unsigned int irq;
|
||||
struct gpio_desc *gpiod;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -46,7 +43,7 @@ struct gpio_keys_button {
|
|||
* @name: input device name
|
||||
*/
|
||||
struct gpio_keys_platform_data {
|
||||
struct gpio_keys_button *buttons;
|
||||
const struct gpio_keys_button *buttons;
|
||||
int nbuttons;
|
||||
unsigned int poll_interval;
|
||||
unsigned int rep:1;
|
||||
|
|
|
|||
|
|
@ -93,8 +93,6 @@ static __inline__ void debug_frame(const struct sk_buff *skb)
|
|||
int hdlc_open(struct net_device *dev);
|
||||
/* Must be called by hardware driver when HDLC device is being closed */
|
||||
void hdlc_close(struct net_device *dev);
|
||||
/* May be used by hardware driver */
|
||||
int hdlc_change_mtu(struct net_device *dev, int new_mtu);
|
||||
/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */
|
||||
netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
|
|
|
|||
|
|
@ -78,6 +78,8 @@ enum hdmi_picture_aspect {
|
|||
HDMI_PICTURE_ASPECT_NONE,
|
||||
HDMI_PICTURE_ASPECT_4_3,
|
||||
HDMI_PICTURE_ASPECT_16_9,
|
||||
HDMI_PICTURE_ASPECT_64_27,
|
||||
HDMI_PICTURE_ASPECT_256_135,
|
||||
HDMI_PICTURE_ASPECT_RESERVED,
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -231,7 +231,11 @@ struct hid_item {
|
|||
#define HID_DG_TAP 0x000d0035
|
||||
#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
|
||||
#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
|
||||
#define HID_DG_BATTERYSTRENGTH 0x000d003b
|
||||
#define HID_DG_INVERT 0x000d003c
|
||||
#define HID_DG_TILT_X 0x000d003d
|
||||
#define HID_DG_TILT_Y 0x000d003e
|
||||
#define HID_DG_TWIST 0x000d0041
|
||||
#define HID_DG_TIPSWITCH 0x000d0042
|
||||
#define HID_DG_TIPSWITCH2 0x000d0043
|
||||
#define HID_DG_BARRELSWITCH 0x000d0044
|
||||
|
|
@ -479,6 +483,7 @@ struct hid_input {
|
|||
struct list_head list;
|
||||
struct hid_report *report;
|
||||
struct input_dev *input;
|
||||
bool registered;
|
||||
};
|
||||
|
||||
enum hid_type {
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ struct hippi_cb {
|
|||
};
|
||||
|
||||
__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
|
||||
int hippi_change_mtu(struct net_device *dev, int new_mtu);
|
||||
int hippi_mac_addr(struct net_device *dev, void *p);
|
||||
int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
|
||||
struct net_device *alloc_hippi_dev(int sizeof_priv);
|
||||
|
|
|
|||
|
|
@ -228,8 +228,8 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t
|
|||
|
||||
static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
|
||||
{
|
||||
timer->node.expires.tv64 = tv64;
|
||||
timer->_softexpires.tv64 = tv64;
|
||||
timer->node.expires = tv64;
|
||||
timer->_softexpires = tv64;
|
||||
}
|
||||
|
||||
static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
|
||||
|
|
@ -256,11 +256,11 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
|
|||
|
||||
static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
|
||||
{
|
||||
return timer->node.expires.tv64;
|
||||
return timer->node.expires;
|
||||
}
|
||||
static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
|
||||
{
|
||||
return timer->_softexpires.tv64;
|
||||
return timer->_softexpires;
|
||||
}
|
||||
|
||||
static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
|
||||
|
|
@ -297,7 +297,7 @@ extern void hrtimer_peek_ahead_timers(void);
|
|||
* this resolution values.
|
||||
*/
|
||||
# define HIGH_RES_NSEC 1
|
||||
# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC }
|
||||
# define KTIME_HIGH_RES (HIGH_RES_NSEC)
|
||||
# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
|
||||
# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
|
||||
|
||||
|
|
@ -333,7 +333,7 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
|
|||
* hrtimer_start_range_ns() to prevent short timeouts.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
|
||||
rem.tv64 -= hrtimer_resolution;
|
||||
rem -= hrtimer_resolution;
|
||||
return rem;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
#ifndef _LINUX_HUGE_MM_H
|
||||
#define _LINUX_HUGE_MM_H
|
||||
|
||||
extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
|
||||
extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
|
||||
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
|
||||
struct vm_area_struct *vma);
|
||||
extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
|
||||
extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
|
||||
extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
|
||||
extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
|
||||
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
pmd_t *pmd,
|
||||
|
|
@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page)
|
|||
return 1;
|
||||
}
|
||||
|
||||
extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
|
||||
extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
|
||||
|
||||
extern struct page *huge_zero_page;
|
||||
|
||||
|
|
@ -189,6 +189,8 @@ static inline void deferred_split_huge_page(struct page *page) {}
|
|||
#define split_huge_pmd(__vma, __pmd, __address) \
|
||||
do { } while (0)
|
||||
|
||||
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long address, bool freeze, struct page *page) {}
|
||||
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
|
||||
unsigned long address, bool freeze, struct page *page) {}
|
||||
|
||||
|
|
@ -210,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
|
||||
static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,8 +30,7 @@
|
|||
* Must not be NULL. *OBSOLETE*
|
||||
* @read: New API. drivers can fill up to max bytes of data
|
||||
* into the buffer. The buffer is aligned for any type
|
||||
* and max is guaranteed to be >= to that alignment
|
||||
* (either 4 or 8 depending on architecture).
|
||||
* and max is a multiple of 4 and >= 32 bytes.
|
||||
* @priv: Private data, for use by the RNG driver.
|
||||
* @quality: Estimation of true entropy in RNG's bitstream
|
||||
* (per mill).
|
||||
|
|
|
|||
|
|
@ -298,8 +298,8 @@ enum hwmon_pwm_attributes {
|
|||
* Channel number
|
||||
* The function returns the file permissions.
|
||||
* If the return value is 0, no attribute will be created.
|
||||
* @read: Read callback. Optional. If not provided, attributes
|
||||
* will not be readable.
|
||||
* @read: Read callback for data attributes. Mandatory if readable
|
||||
* data attributes are present.
|
||||
* Parameters are:
|
||||
* @dev: Pointer to hardware monitoring device
|
||||
* @type: Sensor type
|
||||
|
|
@ -308,8 +308,19 @@ enum hwmon_pwm_attributes {
|
|||
* Channel number
|
||||
* @val: Pointer to returned value
|
||||
* The function returns 0 on success or a negative error number.
|
||||
* @write: Write callback. Optional. If not provided, attributes
|
||||
* will not be writable.
|
||||
* @read_string:
|
||||
* Read callback for string attributes. Mandatory if string
|
||||
* attributes are present.
|
||||
* Parameters are:
|
||||
* @dev: Pointer to hardware monitoring device
|
||||
* @type: Sensor type
|
||||
* @attr: Sensor attribute
|
||||
* @channel:
|
||||
* Channel number
|
||||
* @str: Pointer to returned string
|
||||
* The function returns 0 on success or a negative error number.
|
||||
* @write: Write callback for data attributes. Mandatory if writeable
|
||||
* data attributes are present.
|
||||
* Parameters are:
|
||||
* @dev: Pointer to hardware monitoring device
|
||||
* @type: Sensor type
|
||||
|
|
@ -324,6 +335,8 @@ struct hwmon_ops {
|
|||
u32 attr, int channel);
|
||||
int (*read)(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long *val);
|
||||
int (*read_string)(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, char **str);
|
||||
int (*write)(struct device *dev, enum hwmon_sensor_types type,
|
||||
u32 attr, int channel, long val);
|
||||
};
|
||||
|
|
@ -349,7 +362,9 @@ struct hwmon_chip_info {
|
|||
const struct hwmon_channel_info **info;
|
||||
};
|
||||
|
||||
/* hwmon_device_register() is deprecated */
|
||||
struct device *hwmon_device_register(struct device *dev);
|
||||
|
||||
struct device *
|
||||
hwmon_device_register_with_groups(struct device *dev, const char *name,
|
||||
void *drvdata,
|
||||
|
|
@ -362,12 +377,12 @@ struct device *
|
|||
hwmon_device_register_with_info(struct device *dev,
|
||||
const char *name, void *drvdata,
|
||||
const struct hwmon_chip_info *info,
|
||||
const struct attribute_group **groups);
|
||||
const struct attribute_group **extra_groups);
|
||||
struct device *
|
||||
devm_hwmon_device_register_with_info(struct device *dev,
|
||||
const char *name, void *drvdata,
|
||||
const struct hwmon_chip_info *info,
|
||||
const struct attribute_group **groups);
|
||||
const char *name, void *drvdata,
|
||||
const struct hwmon_chip_info *info,
|
||||
const struct attribute_group **extra_groups);
|
||||
|
||||
void hwmon_device_unregister(struct device *dev);
|
||||
void devm_hwmon_device_unregister(struct device *dev);
|
||||
|
|
|
|||
|
|
@ -696,7 +696,7 @@ enum vmbus_device_type {
|
|||
HV_FCOPY,
|
||||
HV_BACKUP,
|
||||
HV_DM,
|
||||
HV_UNKOWN,
|
||||
HV_UNKNOWN,
|
||||
};
|
||||
|
||||
struct vmbus_device {
|
||||
|
|
@ -1119,6 +1119,12 @@ struct hv_driver {
|
|||
|
||||
struct device_driver driver;
|
||||
|
||||
/* dynamic device GUID's */
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
struct list_head list;
|
||||
} dynids;
|
||||
|
||||
int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
|
||||
int (*remove)(struct hv_device *);
|
||||
void (*shutdown)(struct hv_device *);
|
||||
|
|
@ -1447,6 +1453,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel);
|
|||
|
||||
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
|
||||
|
||||
void vmbus_setevent(struct vmbus_channel *channel);
|
||||
/*
|
||||
* Negotiated version with the Host.
|
||||
*/
|
||||
|
|
@ -1479,10 +1486,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
|
|||
* there is room for the producer to send the pending packet.
|
||||
*/
|
||||
|
||||
static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
|
||||
static inline void hv_signal_on_read(struct vmbus_channel *channel)
|
||||
{
|
||||
u32 cur_write_sz;
|
||||
u32 pending_sz;
|
||||
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
||||
|
||||
/*
|
||||
* Issue a full memory barrier before making the signaling decision.
|
||||
|
|
@ -1500,14 +1508,14 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
|
|||
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
|
||||
/* If the other end is not blocked on write don't bother. */
|
||||
if (pending_sz == 0)
|
||||
return false;
|
||||
return;
|
||||
|
||||
cur_write_sz = hv_get_bytes_to_write(rbi);
|
||||
|
||||
if (cur_write_sz >= pending_sz)
|
||||
return true;
|
||||
vmbus_setevent(channel);
|
||||
|
||||
return false;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1519,31 +1527,23 @@ static inline struct vmpacket_descriptor *
|
|||
get_next_pkt_raw(struct vmbus_channel *channel)
|
||||
{
|
||||
struct hv_ring_buffer_info *ring_info = &channel->inbound;
|
||||
u32 read_loc = ring_info->priv_read_index;
|
||||
u32 priv_read_loc = ring_info->priv_read_index;
|
||||
void *ring_buffer = hv_get_ring_buffer(ring_info);
|
||||
struct vmpacket_descriptor *cur_desc;
|
||||
u32 packetlen;
|
||||
u32 dsize = ring_info->ring_datasize;
|
||||
u32 delta = read_loc - ring_info->ring_buffer->read_index;
|
||||
/*
|
||||
* delta is the difference between what is available to read and
|
||||
* what was already consumed in place. We commit read index after
|
||||
* the whole batch is processed.
|
||||
*/
|
||||
u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
|
||||
priv_read_loc - ring_info->ring_buffer->read_index :
|
||||
(dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
|
||||
u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
|
||||
|
||||
if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
|
||||
return NULL;
|
||||
|
||||
if ((read_loc + sizeof(*cur_desc)) > dsize)
|
||||
return NULL;
|
||||
|
||||
cur_desc = ring_buffer + read_loc;
|
||||
packetlen = cur_desc->len8 << 3;
|
||||
|
||||
/*
|
||||
* If the packet under consideration is wrapping around,
|
||||
* return failure.
|
||||
*/
|
||||
if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
|
||||
return NULL;
|
||||
|
||||
return cur_desc;
|
||||
return ring_buffer + priv_read_loc;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1555,16 +1555,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
|
|||
struct vmpacket_descriptor *desc)
|
||||
{
|
||||
struct hv_ring_buffer_info *ring_info = &channel->inbound;
|
||||
u32 read_loc = ring_info->priv_read_index;
|
||||
u32 packetlen = desc->len8 << 3;
|
||||
u32 dsize = ring_info->ring_datasize;
|
||||
|
||||
if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
|
||||
BUG();
|
||||
/*
|
||||
* Include the packet trailer.
|
||||
*/
|
||||
ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
|
||||
ring_info->priv_read_index %= dsize;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1589,8 +1587,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel)
|
|||
virt_rmb();
|
||||
ring_info->ring_buffer->read_index = ring_info->priv_read_index;
|
||||
|
||||
if (hv_need_to_signal_on_read(ring_info))
|
||||
vmbus_set_event(channel);
|
||||
hv_signal_on_read(channel);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -50,31 +50,4 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
|
|||
struct i2c_smbus_alert_setup *setup);
|
||||
int i2c_handle_smbus_alert(struct i2c_client *ara);
|
||||
|
||||
/**
|
||||
* smbus_host_notify - internal structure used by the Host Notify mechanism.
|
||||
* @adapter: the I2C adapter associated with this struct
|
||||
* @work: worker used to schedule the IRQ in the slave device
|
||||
* @lock: spinlock to check if a notification is already pending
|
||||
* @pending: flag set when a notification is pending (any new notification will
|
||||
* be rejected if pending is true)
|
||||
* @payload: the actual payload of the Host Notify event
|
||||
* @addr: the address of the slave device which raised the notification
|
||||
*
|
||||
* This struct needs to be allocated by i2c_setup_smbus_host_notify() and does
|
||||
* not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a
|
||||
* managed resource to clean this up when the adapter get released.
|
||||
*/
|
||||
struct smbus_host_notify {
|
||||
struct i2c_adapter *adapter;
|
||||
struct work_struct work;
|
||||
spinlock_t lock;
|
||||
bool pending;
|
||||
u16 payload;
|
||||
u8 addr;
|
||||
};
|
||||
|
||||
struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap);
|
||||
int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify,
|
||||
unsigned short addr, unsigned int data);
|
||||
|
||||
#endif /* _LINUX_I2C_SMBUS_H */
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/device.h> /* for struct device */
|
||||
#include <linux/sched.h> /* for completion */
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/irqdomain.h> /* for Host Notify IRQ */
|
||||
#include <linux/of.h> /* for struct device_node */
|
||||
#include <linux/swab.h> /* for swab16 */
|
||||
#include <uapi/linux/i2c.h>
|
||||
|
|
@ -135,7 +136,8 @@ enum i2c_alert_protocol {
|
|||
* struct i2c_driver - represent an I2C device driver
|
||||
* @class: What kind of i2c device we instantiate (for detect)
|
||||
* @attach_adapter: Callback for bus addition (deprecated)
|
||||
* @probe: Callback for device binding
|
||||
* @probe: Callback for device binding - soon to be deprecated
|
||||
* @probe_new: New callback for device binding
|
||||
* @remove: Callback for device unbinding
|
||||
* @shutdown: Callback for device shutdown
|
||||
* @alert: Alert callback, for example for the SMBus alert protocol
|
||||
|
|
@ -178,6 +180,11 @@ struct i2c_driver {
|
|||
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
|
||||
int (*remove)(struct i2c_client *);
|
||||
|
||||
/* New driver model interface to aid the seamless removal of the
|
||||
* current probe()'s, more commonly unused than used second parameter.
|
||||
*/
|
||||
int (*probe_new)(struct i2c_client *);
|
||||
|
||||
/* driver model interfaces that don't relate to enumeration */
|
||||
void (*shutdown)(struct i2c_client *);
|
||||
|
||||
|
|
@ -243,6 +250,8 @@ struct i2c_client {
|
|||
|
||||
extern struct i2c_client *i2c_verify_client(struct device *dev);
|
||||
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
|
||||
extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
|
||||
const struct i2c_client *client);
|
||||
|
||||
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
|
||||
{
|
||||
|
|
@ -567,6 +576,8 @@ struct i2c_adapter {
|
|||
|
||||
struct i2c_bus_recovery_info *bus_recovery_info;
|
||||
const struct i2c_adapter_quirks *quirks;
|
||||
|
||||
struct irq_domain *host_notify_domain;
|
||||
};
|
||||
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
|
||||
|
||||
|
|
@ -739,6 +750,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
|
|||
return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
|
||||
}
|
||||
|
||||
int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
|
||||
/**
|
||||
* module_i2c_driver() - Helper macro for registering a modular I2C driver
|
||||
* @__i2c_driver: i2c_driver struct
|
||||
|
|
@ -774,6 +786,10 @@ extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
|
|||
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
|
||||
struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
|
||||
|
||||
extern const struct of_device_id
|
||||
*i2c_of_match_device(const struct of_device_id *matches,
|
||||
struct i2c_client *client);
|
||||
|
||||
#else
|
||||
|
||||
static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
|
||||
|
|
@ -790,6 +806,14 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline const struct of_device_id
|
||||
*i2c_of_match_device(const struct of_device_id *matches,
|
||||
struct i2c_client *client)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
|
|
|
|||
52
include/linux/i2c/mlxcpld.h
Normal file
52
include/linux/i2c/mlxcpld.h
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* mlxcpld.h - Mellanox I2C multiplexer support in CPLD
|
||||
*
|
||||
* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
|
||||
* Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the names of the copyright holders nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* Alternatively, this software may be distributed under the terms of the
|
||||
* GNU General Public License ("GPL") version 2 as published by the Free
|
||||
* Software Foundation.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_I2C_MLXCPLD_H
|
||||
#define _LINUX_I2C_MLXCPLD_H
|
||||
|
||||
/* Platform data for the CPLD I2C multiplexers */
|
||||
|
||||
/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
|
||||
* @adap_ids - adapter array
|
||||
* @num_adaps - number of adapters
|
||||
* @sel_reg_addr - mux select register offset in CPLD space
|
||||
*/
|
||||
struct mlxcpld_mux_plat_data {
|
||||
int *adap_ids;
|
||||
int num_adaps;
|
||||
int sel_reg_addr;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_I2C_MLXCPLD_H */
|
||||
|
|
@ -18,12 +18,11 @@
|
|||
#include <linux/rcupdate.h>
|
||||
|
||||
/*
|
||||
* We want shallower trees and thus more bits covered at each layer. 8
|
||||
* bits gives us large enough first layer for most use cases and maximum
|
||||
* tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
|
||||
* 1k on 32bit.
|
||||
* Using 6 bits at each layer allows us to allocate 7 layers out of each page.
|
||||
* 8 bits only gave us 3 layers out of every pair of pages, which is less
|
||||
* efficient except for trees with a largest element between 192-255 inclusive.
|
||||
*/
|
||||
#define IDR_BITS 8
|
||||
#define IDR_BITS 6
|
||||
#define IDR_SIZE (1 << IDR_BITS)
|
||||
#define IDR_MASK ((1 << IDR_BITS)-1)
|
||||
|
||||
|
|
@ -55,6 +54,32 @@ struct idr {
|
|||
}
|
||||
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
||||
|
||||
/**
|
||||
* idr_get_cursor - Return the current position of the cyclic allocator
|
||||
* @idr: idr handle
|
||||
*
|
||||
* The value returned is the value that will be next returned from
|
||||
* idr_alloc_cyclic() if it is free (otherwise the search will start from
|
||||
* this position).
|
||||
*/
|
||||
static inline unsigned int idr_get_cursor(struct idr *idr)
|
||||
{
|
||||
return READ_ONCE(idr->cur);
|
||||
}
|
||||
|
||||
/**
|
||||
* idr_set_cursor - Set the current position of the cyclic allocator
|
||||
* @idr: idr handle
|
||||
* @val: new position
|
||||
*
|
||||
* The next call to idr_alloc_cyclic() will return @val if it is free
|
||||
* (otherwise the search will start from this position).
|
||||
*/
|
||||
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
|
||||
{
|
||||
WRITE_ONCE(idr->cur, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: idr sync
|
||||
* idr synchronization (stolen from radix-tree.h)
|
||||
|
|
@ -195,6 +220,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
|
|||
return ida_get_new_above(ida, 0, p_id);
|
||||
}
|
||||
|
||||
static inline bool ida_is_empty(struct ida *ida)
|
||||
{
|
||||
return idr_is_empty(&ida->idr);
|
||||
}
|
||||
|
||||
void __init idr_init_cache(void);
|
||||
|
||||
#endif /* __IDR_H__ */
|
||||
|
|
|
|||
|
|
@ -1576,6 +1576,9 @@ struct ieee80211_vht_operation {
|
|||
#define WLAN_AUTH_SHARED_KEY 1
|
||||
#define WLAN_AUTH_FT 2
|
||||
#define WLAN_AUTH_SAE 3
|
||||
#define WLAN_AUTH_FILS_SK 4
|
||||
#define WLAN_AUTH_FILS_SK_PFS 5
|
||||
#define WLAN_AUTH_FILS_PK 6
|
||||
#define WLAN_AUTH_LEAP 128
|
||||
|
||||
#define WLAN_AUTH_CHALLENGE_LEN 128
|
||||
|
|
@ -1960,6 +1963,26 @@ enum ieee80211_eid {
|
|||
|
||||
WLAN_EID_VENDOR_SPECIFIC = 221,
|
||||
WLAN_EID_QOS_PARAMETER = 222,
|
||||
WLAN_EID_CAG_NUMBER = 237,
|
||||
WLAN_EID_AP_CSN = 239,
|
||||
WLAN_EID_FILS_INDICATION = 240,
|
||||
WLAN_EID_DILS = 241,
|
||||
WLAN_EID_FRAGMENT = 242,
|
||||
WLAN_EID_EXTENSION = 255
|
||||
};
|
||||
|
||||
/* Element ID Extensions for Element ID 255 */
|
||||
enum ieee80211_eid_ext {
|
||||
WLAN_EID_EXT_ASSOC_DELAY_INFO = 1,
|
||||
WLAN_EID_EXT_FILS_REQ_PARAMS = 2,
|
||||
WLAN_EID_EXT_FILS_KEY_CONFIRM = 3,
|
||||
WLAN_EID_EXT_FILS_SESSION = 4,
|
||||
WLAN_EID_EXT_FILS_HLP_CONTAINER = 5,
|
||||
WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6,
|
||||
WLAN_EID_EXT_KEY_DELIVERY = 7,
|
||||
WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
|
||||
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
|
||||
WLAN_EID_EXT_FILS_NONCE = 13,
|
||||
};
|
||||
|
||||
/* Action category code */
|
||||
|
|
@ -2073,6 +2096,9 @@ enum ieee80211_key_len {
|
|||
#define IEEE80211_GCMP_MIC_LEN 16
|
||||
#define IEEE80211_GCMP_PN_LEN 6
|
||||
|
||||
#define FILS_NONCE_LEN 16
|
||||
#define FILS_MAX_KEK_LEN 64
|
||||
|
||||
/* Public action codes */
|
||||
enum ieee80211_pub_actioncode {
|
||||
WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue