for-5.20/block-2022-07-29

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmLko3gQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpmQaD/90NKFj4v8I456TUQyg1jimXEsL+e84E6o2
 ALWVb6JzQvlPVQXNLnK5YKIunMWOTtTMz0nyB8sVRwVJVJO0P5d7QopAkZM8fkyU
 MK5OCzoryENw4DTc2wJS4in6cSbGylIuN74wMzlf7+M67JTImfoZQhbTMcjwzZfn
 b3OlL6sID7zMXwGcuOJPZyUJICCpDhzdSF9JXqKma5PQuG2SBmQyvFxJAcsoFBPc
 YetnoRIOIN6yBvsIZaPaYq7XI9MIvF0e67EQtyCEHj4tHpyVnyDWkeObVFULsISU
 gGEKbkYPvNUzRAU5Q1NBBHh1tTfkf/MaUxTuZwoEwZ/s04IGBGMmrZGyfvdfzYo6
 M7NwSEg/TrUSNfTwn65mQi7uOXu1pGkJrqz84Flm8u9Qid9Vd7LExLG5p/ggnWdH
 5th93MDEmtEg29e9DXpEAuS5d0t3TtSvosflaKpyfNNfr+P0rWCN6GM/uW62VUTK
 ls69SQh/AQJRbg64jU4xper6WhaYtSXK7TKEnxJycoEn9gYNyCcdot2uekth0xRH
 ChHGmRlteiqe/y4uFWn/2dcxWjoleiHbFjTaiRL75WVl8wIDEjw02LGuoZ61Ss9H
 WOV+MT7KqNjBGe6lreUY+O/PO02dzmoR6heJXN19p8zr/pBuLCTGX7UpO7rzgaBR
 4N1HEozvIw==
 =celk
 -----END PGP SIGNATURE-----

Merge tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block

Pull block updates from Jens Axboe:

 - Improve the type checking of request flags (Bart)

 - Ensure queue mapping for a single queues always picks the right queue
   (Bart)

 - Sanitize the io priority handling (Jan)

 - rq-qos race fix (Jinke)

 - Reserved tags handling improvements (John)

 - Separate memory alignment from file/disk offset aligment for O_DIRECT
   (Keith)

 - Add new ublk driver, userspace block driver using io_uring for
   communication with the userspace backend (Ming)

 - Use try_cmpxchg() to cleanup the code in various spots (Uros)

 - Finally remove bdevname() (Christoph)

 - Clean up the zoned device handling (Christoph)

 - Clean up independent access range support (Christoph)

 - Clean up and improve block sysfs handling (Christoph)

 - Clean up and improve teardown of block devices.

   This turns the usual two step process into something that is simpler
   to implement and handle in block drivers (Christoph)

 - Clean up chunk size handling (Christoph)

 - Misc cleanups and fixes (Bart, Bo, Dan, GuoYong, Jason, Keith, Liu,
   Ming, Sebastian, Yang, Ying)

* tag 'for-5.20/block-2022-07-29' of git://git.kernel.dk/linux-block: (178 commits)
  ublk_drv: fix double shift bug
  ublk_drv: make sure that correct flags(features) returned to userspace
  ublk_drv: fix error handling of ublk_add_dev
  ublk_drv: fix lockdep warning
  block: remove __blk_get_queue
  block: call blk_mq_exit_queue from disk_release for never added disks
  blk-mq: fix error handling in __blk_mq_alloc_disk
  ublk: defer disk allocation
  ublk: rewrite ublk_ctrl_get_queue_affinity to not rely on hctx->cpumask
  ublk: fold __ublk_create_dev into ublk_ctrl_add_dev
  ublk: cleanup ublk_ctrl_uring_cmd
  ublk: simplify ublk_ch_open and ublk_ch_release
  ublk: remove the empty open and release block device operations
  ublk: remove UBLK_IO_F_PREFLUSH
  ublk: add a MAINTAINERS entry
  block: don't allow the same type rq_qos add more than once
  mmc: fix disk/queue leak in case of adding disk failure
  ublk_drv: fix an IS_ERR() vs NULL check
  ublk: remove UBLK_IO_F_INTEGRITY
  ublk_drv: remove unneeded semicolon
  ...
This commit is contained in:
Linus Torvalds 2022-08-02 13:46:35 -07:00
commit c013d0af81
261 changed files with 3640 additions and 2163 deletions

View file

@ -405,7 +405,7 @@ extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask,
blk_opf_t opf, gfp_t gfp_mask,
struct bio_set *bs);
struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *);
@ -418,7 +418,7 @@ int bio_init_clone(struct block_device *bdev, struct bio *bio,
extern struct bio_set fs_bio_set;
static inline struct bio *bio_alloc(struct block_device *bdev,
unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask)
unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
{
return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
}
@ -456,9 +456,9 @@ struct request_queue;
extern int submit_bio_wait(struct bio *bio);
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, unsigned int opf);
unsigned short max_vecs, blk_opf_t opf);
extern void bio_uninit(struct bio *);
void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf);
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
@ -789,6 +789,6 @@ static inline void bio_clear_polled(struct bio *bio)
}
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp);
unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
#endif /* __LINUX_BIO_H */

View file

@ -57,6 +57,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
/* queue has elevator attached */
#define RQF_ELV ((__force req_flags_t)(1 << 22))
#define RQF_RESV ((__force req_flags_t)(1 << 23))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
@ -79,7 +80,7 @@ struct request {
struct blk_mq_ctx *mq_ctx;
struct blk_mq_hw_ctx *mq_hctx;
unsigned int cmd_flags; /* op and common flags */
blk_opf_t cmd_flags; /* op and common flags */
req_flags_t rq_flags;
int tag;
@ -197,8 +198,10 @@ struct request {
void *end_io_data;
};
#define req_op(req) \
((req)->cmd_flags & REQ_OP_MASK)
static inline enum req_op req_op(const struct request *req)
{
return req->cmd_flags & REQ_OP_MASK;
}
static inline bool blk_rq_is_passthrough(struct request *rq)
{
@ -519,7 +522,7 @@ struct blk_mq_queue_data {
bool last;
};
typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
typedef bool (busy_tag_iter_fn)(struct request *, void *);
/**
* struct blk_mq_ops - Callback functions that implements block driver
@ -574,7 +577,7 @@ struct blk_mq_ops {
/**
* @timeout: Called on request timeout.
*/
enum blk_eh_timer_return (*timeout)(struct request *, bool);
enum blk_eh_timer_return (*timeout)(struct request *);
/**
* @poll: Called to poll for completion of a specific tag.
@ -686,10 +689,12 @@ struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
\
__blk_mq_alloc_disk(set, queuedata, &__key); \
})
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass);
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
void blk_mq_unregister_dev(struct device *, struct request_queue *);
void blk_mq_destroy_queue(struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
@ -710,10 +715,10 @@ enum {
BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
};
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
unsigned int op, blk_mq_req_flags_t flags,
blk_opf_t opf, blk_mq_req_flags_t flags,
unsigned int hctx_idx);
/*
@ -823,6 +828,11 @@ static inline bool blk_mq_need_time_stamp(struct request *rq)
return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
}
static inline bool blk_mq_is_reserved_rq(struct request *rq)
{
return rq->rq_flags & RQF_RESV;
}
/*
* Batched completions only work when there is no I/O error and no special
* ->end_io handler.
@ -1121,12 +1131,12 @@ void blk_dump_rq_flags(struct request *, char *);
#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_rq_zone_no(struct request *rq)
{
return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
return disk_zone_no(rq->q->disk, blk_rq_pos(rq));
}
static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
{
return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
}
bool blk_req_needs_zone_write_lock(struct request *rq);
@ -1148,8 +1158,8 @@ static inline void blk_req_zone_write_unlock(struct request *rq)
static inline bool blk_req_zone_is_write_locked(struct request *rq)
{
return rq->q->seq_zones_wlock &&
test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
return rq->q->disk->seq_zones_wlock &&
test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock);
}
static inline bool blk_req_can_dispatch_to_zone(struct request *rq)

View file

@ -240,6 +240,8 @@ static inline void bio_issue_init(struct bio_issue *issue,
((u64)size << BIO_ISSUE_SIZE_SHIFT));
}
typedef __u32 __bitwise blk_opf_t;
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
@ -250,7 +252,7 @@ typedef unsigned int blk_qc_t;
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
unsigned int bi_opf; /* bottom bits REQ_OP, top bits
blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
* req_flags.
*/
unsigned short bi_flags; /* BIO_* below */
@ -337,8 +339,12 @@ enum {
typedef __u32 __bitwise blk_mq_req_flags_t;
/*
* Operations and flags common to the bio and request structures.
#define REQ_OP_BITS 8
#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
#define REQ_FLAG_BITS 24
/**
* enum req_op - Operations common to the bio and request structures.
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
*
* The least significant bit of the operation number indicates the data
@ -350,41 +356,37 @@ typedef __u32 __bitwise blk_mq_req_flags_t;
* If a operation does not transfer data the least significant bit has no
* meaning.
*/
#define REQ_OP_BITS 8
#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
#define REQ_FLAG_BITS 24
enum req_opf {
enum req_op {
/* read sectors from the device */
REQ_OP_READ = 0,
REQ_OP_READ = (__force blk_opf_t)0,
/* write sectors to the device */
REQ_OP_WRITE = 1,
REQ_OP_WRITE = (__force blk_opf_t)1,
/* flush the volatile write cache */
REQ_OP_FLUSH = 2,
REQ_OP_FLUSH = (__force blk_opf_t)2,
/* discard sectors */
REQ_OP_DISCARD = 3,
REQ_OP_DISCARD = (__force blk_opf_t)3,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = 5,
REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 9,
REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */
REQ_OP_ZONE_OPEN = 10,
REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
/* Close a zone */
REQ_OP_ZONE_CLOSE = 11,
REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
/* Transition a zone to full */
REQ_OP_ZONE_FINISH = 12,
REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
/* write data at the current zone write pointer */
REQ_OP_ZONE_APPEND = 13,
REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
/* reset a zone write pointer */
REQ_OP_ZONE_RESET = 15,
REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
/* reset all the zone present on the device */
REQ_OP_ZONE_RESET_ALL = 17,
REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
/* Driver private requests */
REQ_OP_DRV_IN = 34,
REQ_OP_DRV_OUT = 35,
REQ_OP_DRV_IN = (__force blk_opf_t)34,
REQ_OP_DRV_OUT = (__force blk_opf_t)35,
REQ_OP_LAST,
REQ_OP_LAST = (__force blk_opf_t)36,
};
enum req_flag_bits {
@ -425,28 +427,31 @@ enum req_flag_bits {
__REQ_NR_BITS, /* stops here */
};
#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
#define REQ_SYNC (1ULL << __REQ_SYNC)
#define REQ_META (1ULL << __REQ_META)
#define REQ_PRIO (1ULL << __REQ_PRIO)
#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
#define REQ_IDLE (1ULL << __REQ_IDLE)
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
#define REQ_FUA (1ULL << __REQ_FUA)
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_FAILFAST_DEV \
(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT \
(__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER \
(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
#define REQ_CGROUP_PUNT (__force blk_opf_t)(1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_POLLED (1ULL << __REQ_POLLED)
#define REQ_ALLOC_CACHE (1ULL << __REQ_ALLOC_CACHE)
#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@ -463,26 +468,28 @@ enum stat_group {
NR_STAT_GROUPS
};
#define bio_op(bio) \
((bio)->bi_opf & REQ_OP_MASK)
static inline enum req_op bio_op(const struct bio *bio)
{
return bio->bi_opf & REQ_OP_MASK;
}
/* obsolete, don't use in new code */
static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
unsigned op_flags)
static inline void bio_set_op_attrs(struct bio *bio, enum req_op op,
blk_opf_t op_flags)
{
bio->bi_opf = op | op_flags;
}
static inline bool op_is_write(unsigned int op)
static inline bool op_is_write(blk_opf_t op)
{
return (op & 1);
return !!(op & (__force blk_opf_t)1);
}
/*
* Check if the bio or request is one that needs special treatment in the
* flush state machine.
*/
static inline bool op_is_flush(unsigned int op)
static inline bool op_is_flush(blk_opf_t op)
{
return op & (REQ_FUA | REQ_PREFLUSH);
}
@ -492,13 +499,13 @@ static inline bool op_is_flush(unsigned int op)
* PREFLUSH flag. Other operations may be marked as synchronous using the
* REQ_SYNC flag.
*/
static inline bool op_is_sync(unsigned int op)
static inline bool op_is_sync(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_READ ||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
static inline bool op_is_discard(unsigned int op)
static inline bool op_is_discard(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
@ -509,7 +516,7 @@ static inline bool op_is_discard(unsigned int op)
* due to its different handling in the block layer and device response in
* case of command failure.
*/
static inline bool op_is_zone_mgmt(enum req_opf op)
static inline bool op_is_zone_mgmt(enum req_op op)
{
switch (op & REQ_OP_MASK) {
case REQ_OP_ZONE_RESET:
@ -522,7 +529,7 @@ static inline bool op_is_zone_mgmt(enum req_opf op)
}
}
static inline int op_stat_group(unsigned int op)
static inline int op_stat_group(enum req_op op)
{
if (op_is_discard(op))
return STAT_DISCARD;

View file

@ -148,6 +148,7 @@ struct gendisk {
#define GD_NATIVE_CAPACITY 3
#define GD_ADDED 4
#define GD_SUPPRESS_PART_SCAN 5
#define GD_OWNS_QUEUE 6
struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */
@ -163,6 +164,29 @@ struct gendisk {
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct kobject integrity_kobj;
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#ifdef CONFIG_BLK_DEV_ZONED
/*
* Zoned block device information for request dispatch control.
* nr_zones is the total number of zones of the device. This is always
* 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
* bits which indicates if a zone is conventional (bit set) or
* sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
* bits which indicates if a zone is write locked, that is, if a write
* request targeting the zone was dispatched.
*
* Reads of this information must be protected with blk_queue_enter() /
* blk_queue_exit(). Modifying this information is only allowed while
* no requests are being processed. See also blk_mq_freeze_queue() and
* blk_mq_unfreeze_queue().
*/
unsigned int nr_zones;
unsigned int max_open_zones;
unsigned int max_active_zones;
unsigned long *conv_zones_bitmap;
unsigned long *seq_zones_wlock;
#endif /* CONFIG_BLK_DEV_ZONED */
#if IS_ENABLED(CONFIG_CDROM)
struct cdrom_device_info *cdi;
#endif
@ -170,6 +194,12 @@ struct gendisk {
struct badblocks *bb;
struct lockdep_map lockdep_map;
u64 diskseq;
/*
* Independent sector access ranges. This is always NULL for
* devices that do not have multiple independent access ranges.
*/
struct blk_independent_access_ranges *ia_ranges;
};
static inline bool disk_live(struct gendisk *disk)
@ -220,7 +250,7 @@ static inline int blk_validate_block_size(unsigned long bsize)
return 0;
}
static inline bool blk_op_is_passthrough(unsigned int op)
static inline bool blk_op_is_passthrough(blk_opf_t op)
{
op &= REQ_OP_MASK;
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
@ -284,15 +314,15 @@ struct queue_limits {
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
#ifdef CONFIG_BLK_DEV_ZONED
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
unsigned int blkdev_nr_zones(struct gendisk *disk);
extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
unsigned int bdev_nr_zones(struct block_device *bdev);
extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
sector_t sectors, sector_t nr_sectors,
gfp_t gfp_mask);
int blk_revalidate_disk_zones(struct gendisk *disk,
@ -305,7 +335,7 @@ extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
static inline unsigned int bdev_nr_zones(struct block_device *bdev)
{
return 0;
}
@ -424,6 +454,11 @@ struct request_queue {
unsigned long nr_requests; /* Max # of requests */
unsigned int dma_pad_mask;
/*
* Drivers that set dma_alignment to less than 511 must be prepared to
* handle individual bvec's that are not a multiple of a SECTOR_SIZE
* due to possible offsets.
*/
unsigned int dma_alignment;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
@ -455,31 +490,6 @@ struct request_queue {
unsigned int required_elevator_features;
#ifdef CONFIG_BLK_DEV_ZONED
/*
* Zoned block device information for request dispatch control.
* nr_zones is the total number of zones of the device. This is always
* 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
* bits which indicates if a zone is conventional (bit set) or
* sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
* bits which indicates if a zone is write locked, that is, if a write
* request targeting the zone was dispatched. All three fields are
* initialized by the low level device driver (e.g. scsi/sd.c).
* Stacking drivers (device mappers) may or may not initialize
* these fields.
*
* Reads of this information must be protected with blk_queue_enter() /
* blk_queue_exit(). Modifying this information is only allowed while
* no requests are being processed. See also blk_mq_freeze_queue() and
* blk_mq_unfreeze_queue().
*/
unsigned int nr_zones;
unsigned long *conv_zones_bitmap;
unsigned long *seq_zones_wlock;
unsigned int max_open_zones;
unsigned int max_active_zones;
#endif /* CONFIG_BLK_DEV_ZONED */
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace __rcu *blk_trace;
@ -533,12 +543,6 @@ struct request_queue {
bool mq_sysfs_init_done;
/*
* Independent sector access ranges. This is always NULL for
* devices that do not have multiple independent access ranges.
*/
struct blk_independent_access_ranges *ia_ranges;
/**
* @srcu: Sleepable RCU. Use as lock when type of the request queue
* is blocking (BLK_MQ_F_BLOCKING). Must be the last member
@ -559,7 +563,6 @@ struct request_queue {
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
@ -587,7 +590,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
@ -663,76 +665,69 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return blk_queue_is_zoned(q) ? q->nr_zones : 0;
return blk_queue_is_zoned(disk->queue) ? disk->nr_zones : 0;
}
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
if (!blk_queue_is_zoned(q))
if (!blk_queue_is_zoned(disk->queue))
return 0;
return sector >> ilog2(q->limits.chunk_sectors);
return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
static inline bool blk_queue_zone_is_seq(struct request_queue *q,
sector_t sector)
static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
{
if (!blk_queue_is_zoned(q))
if (!blk_queue_is_zoned(disk->queue))
return false;
if (!q->conv_zones_bitmap)
if (!disk->conv_zones_bitmap)
return true;
return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
}
static inline void blk_queue_max_open_zones(struct request_queue *q,
static inline void disk_set_max_open_zones(struct gendisk *disk,
unsigned int max_open_zones)
{
q->max_open_zones = max_open_zones;
disk->max_open_zones = max_open_zones;
}
static inline unsigned int queue_max_open_zones(const struct request_queue *q)
{
return q->max_open_zones;
}
static inline void blk_queue_max_active_zones(struct request_queue *q,
static inline void disk_set_max_active_zones(struct gendisk *disk,
unsigned int max_active_zones)
{
q->max_active_zones = max_active_zones;
disk->max_active_zones = max_active_zones;
}
static inline unsigned int queue_max_active_zones(const struct request_queue *q)
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
return q->max_active_zones;
return bdev->bd_disk->max_open_zones;
}
static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
return bdev->bd_disk->max_active_zones;
}
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
}
static inline bool blk_queue_zone_is_seq(struct request_queue *q,
sector_t sector)
static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector)
{
return false;
}
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
return 0;
}
static inline unsigned int queue_max_open_zones(const struct request_queue *q)
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
return 0;
}
static inline unsigned int queue_max_active_zones(const struct request_queue *q)
static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
return 0;
}
@ -812,8 +807,6 @@ static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
int bdev_disk_changed(struct gendisk *disk, bool invalidate);
struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
struct lock_class_key *lkclass);
void put_disk(struct gendisk *disk);
struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
@ -832,7 +825,6 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass);
\
__blk_alloc_disk(node_id, &__key); \
})
void blk_cleanup_disk(struct gendisk *disk);
int __register_blkdev(unsigned int major, const char *name,
void (*probe)(dev_t devt));
@ -880,7 +872,7 @@ extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
/* Helper to convert REQ_OP_XXX to its string format XXX */
extern const char *blk_op_str(unsigned int op);
extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
@ -898,64 +890,33 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
return bdev->bd_queue; /* this is never NULL */
}
#ifdef CONFIG_BLK_DEV_ZONED
/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
static inline unsigned int bio_zone_no(struct bio *bio)
{
return blk_queue_zone_no(bdev_get_queue(bio->bi_bdev),
bio->bi_iter.bi_sector);
return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
static inline unsigned int bio_zone_is_seq(struct bio *bio)
{
return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev),
bio->bi_iter.bi_sector);
}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
return min(q->limits.max_discard_sectors,
UINT_MAX >> SECTOR_SHIFT);
if (unlikely(op == REQ_OP_WRITE_ZEROES))
return q->limits.max_write_zeroes_sectors;
return q->limits.max_sectors;
return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
/*
* Return maximum size of a request at given offset. Only valid for
* file system requests.
* Return how much of the chunk is left to be used for I/O at a given offset.
*/
static inline unsigned int blk_max_size_offset(struct request_queue *q,
sector_t offset,
unsigned int chunk_sectors)
static inline unsigned int blk_chunk_sectors_left(sector_t offset,
unsigned int chunk_sectors)
{
if (!chunk_sectors) {
if (q->limits.chunk_sectors)
chunk_sectors = q->limits.chunk_sectors;
else
return q->limits.max_sectors;
}
if (likely(is_power_of_2(chunk_sectors)))
chunk_sectors -= offset & (chunk_sectors - 1);
else
chunk_sectors -= sector_div(offset, chunk_sectors);
return min(q->limits.max_sectors, chunk_sectors);
if (unlikely(!is_power_of_2(chunk_sectors)))
return chunk_sectors - sector_div(offset, chunk_sectors);
return chunk_sectors - (offset & (chunk_sectors - 1));
}
/*
* Access functions for manipulating queue properties
*/
extern void blk_cleanup_queue(struct request_queue *);
void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
@ -1337,27 +1298,9 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
return blk_queue_zone_sectors(q);
return 0;
}
static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
return queue_max_open_zones(q);
return 0;
}
static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
return queue_max_active_zones(q);
return 0;
if (!blk_queue_is_zoned(q))
return 0;
return q->limits.chunk_sectors;
}
static inline int queue_dma_alignment(const struct request_queue *q)
@ -1365,6 +1308,18 @@ static inline int queue_dma_alignment(const struct request_queue *q)
return q ? q->dma_alignment : 511;
}
static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{
return queue_dma_alignment(bdev_get_queue(bdev));
}
static inline bool bdev_iter_is_aligned(struct block_device *bdev,
struct iov_iter *iter)
{
return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
bdev_logical_block_size(bdev) - 1);
}
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
unsigned int len)
{
@ -1426,7 +1381,7 @@ struct block_device_operations {
unsigned int flags);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
int (*rw_page)(struct block_device *, sector_t, struct page *, enum req_op);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
@ -1479,9 +1434,9 @@ static inline void blk_wake_io_task(struct task_struct *waiter)
}
unsigned long bdev_start_io_acct(struct block_device *bdev,
unsigned int sectors, unsigned int op,
unsigned int sectors, enum req_op op,
unsigned long start_time);
void bdev_end_io_acct(struct block_device *bdev, unsigned int op,
void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
unsigned long start_time);
void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
@ -1502,7 +1457,6 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
int bdev_read_only(struct block_device *bdev);
int set_blocksize(struct block_device *bdev, int size);
const char *bdevname(struct block_device *bdev, char *buffer);
int lookup_bdev(const char *pathname, dev_t *dev);
void blkdev_show(struct seq_file *seqf, off_t offset);

View file

@ -7,6 +7,7 @@
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
#include <linux/list.h>
#include <linux/blk_types.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE)
@ -77,10 +78,6 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
extern void blk_trace_remove_sysfs(struct device *dev);
extern int blk_trace_init_sysfs(struct device *dev);
extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
@ -91,13 +88,7 @@ extern struct attribute_group blk_trace_attr_group;
# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
# define blk_trace_remove_sysfs(dev) do { } while (0)
# define blk_trace_note_message_enabled(q) (false)
static inline int blk_trace_init_sysfs(struct device *dev)
{
return 0;
}
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_COMPAT
@ -115,7 +106,7 @@ struct compat_blk_user_trace_setup {
#endif
void blk_fill_rwbs(char *rwbs, unsigned int op);
void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{

View file

@ -9,6 +9,7 @@
#define _LINUX_BUFFER_HEAD_H
#include <linux/types.h>
#include <linux/blk_types.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/pagemap.h>
@ -201,11 +202,11 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
void ll_rw_block(int, int, int, struct buffer_head * bh[]);
void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
void write_dirty_buffer(struct buffer_head *bh, int op_flags);
int submit_bh(int, int, struct buffer_head *);
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
int submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);

View file

@ -13,6 +13,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/blk_types.h>
struct dm_io_region {
struct block_device *bdev;
@ -57,8 +58,7 @@ struct dm_io_notify {
*/
struct dm_io_client;
struct dm_io_request {
int bi_op; /* REQ_OP */
int bi_op_flags; /* req_flag_bits */
blk_opf_t bi_opf; /* Request type and flags */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */

View file

@ -11,7 +11,7 @@
/*
* Default IO priority.
*/
#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM)
#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0)
/*
* Check that a priority value has a valid class.
@ -46,23 +46,19 @@ static inline int task_nice_ioclass(struct task_struct *task)
return IOPRIO_CLASS_BE;
}
/*
* If the calling process has set an I/O priority, use that. Otherwise, return
* the default I/O priority.
*/
static inline int get_current_ioprio(void)
#ifdef CONFIG_BLOCK
int __get_task_ioprio(struct task_struct *p);
#else
static inline int __get_task_ioprio(struct task_struct *p)
{
struct io_context *ioc = current->io_context;
if (ioc)
return ioc->ioprio;
return IOPRIO_DEFAULT;
}
#endif /* CONFIG_BLOCK */
/*
* For inheritance, return the highest of the two given priorities
*/
extern int ioprio_best(unsigned short aprio, unsigned short bprio);
static inline int get_current_ioprio(void)
{
return __get_task_ioprio(current);
}
extern int set_task_ioprio(struct task_struct *task, int ioprio);

View file

@ -1557,7 +1557,7 @@ extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
unsigned long, int);
unsigned long, blk_opf_t);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);

View file

@ -219,6 +219,8 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
#endif
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,

View file

@ -101,9 +101,9 @@ struct writeback_control {
#endif
};
static inline int wbc_to_write_flags(struct writeback_control *wbc)
static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc)
{
int flags = 0;
blk_opf_t flags = 0;
if (wbc->punt_to_cgroup)
flags = REQ_CGROUP_PUNT;