Merge branch 'for-2.6.37/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.37/core' of git://git.kernel.dk/linux-2.6-block: (39 commits)
cfq-iosched: Fix a gcc 4.5 warning and put some comments
block: Turn bvec_k{un,}map_irq() into static inline functions
block: fix accounting bug on cross partition merges
block: Make the integrity mapped property a bio flag
block: Fix double free in blk_integrity_unregister
block: Ensure physical block size is unsigned int
blkio-throttle: Fix possible multiplication overflow in iops calculations
blkio-throttle: limit max iops value to UINT_MAX
blkio-throttle: There is no need to convert jiffies to milli seconds
blkio-throttle: Fix link failure failure on i386
blkio: Recalculate the throttled bio dispatch time upon throttle limit change
blkio: Add root group to td->tg_list
blkio: deletion of a cgroup was causes oops
blkio: Do not export throttle files if CONFIG_BLK_DEV_THROTTLING=n
block: set the bounce_pfn to the actual DMA limit rather than to max memory
block: revert bad fix for memory hotplug causing bounces
Fix compile error in blk-exec.c for !CONFIG_DETECT_HUNG_TASK
block: set the bounce_pfn to the actual DMA limit rather than to max memory
block: Prevent hang_check firing during long I/O
cfq: improve fsync performance for small files
...
Fix up trivial conflicts due to __rcu sparse annotation in include/linux/genhd.h
This commit is contained in:
commit
e9dd2b6837
43 changed files with 2494 additions and 299 deletions
|
|
@ -346,8 +346,15 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
|
|||
}
|
||||
|
||||
#else
|
||||
#define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
|
||||
#define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
|
||||
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
|
||||
{
|
||||
return page_address(bvec->bv_page) + bvec->bv_offset;
|
||||
}
|
||||
|
||||
static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
|
||||
{
|
||||
*flags = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
|
||||
|
|
@ -496,6 +503,10 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
|
|||
#define bip_for_each_vec(bvl, bip, i) \
|
||||
__bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
|
||||
|
||||
#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
|
||||
for_each_bio(_bio) \
|
||||
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
|
||||
|
||||
#define bio_integrity(bio) (bio->bi_integrity != NULL)
|
||||
|
||||
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
|
||||
|
|
|
|||
|
|
@ -97,6 +97,7 @@ struct bio {
|
|||
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
|
||||
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
|
||||
#define BIO_QUIET 11 /* Make BIO Quiet */
|
||||
#define BIO_MAPPED_INTEGRITY 12/* integrity metadata has been remapped */
|
||||
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
|
||||
|
||||
/*
|
||||
|
|
@ -130,6 +131,8 @@ enum rq_flag_bits {
|
|||
/* bio only flags */
|
||||
__REQ_UNPLUG, /* unplug the immediately after submission */
|
||||
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
||||
__REQ_THROTTLED, /* This bio has already been subjected to
|
||||
* throttling rules. Don't do it again. */
|
||||
|
||||
/* request only flags */
|
||||
__REQ_SORTED, /* elevator knows about this request */
|
||||
|
|
@ -146,7 +149,6 @@ enum rq_flag_bits {
|
|||
__REQ_ORDERED_COLOR, /* is before or after barrier */
|
||||
__REQ_ALLOCED, /* request came from our alloc pool */
|
||||
__REQ_COPY_USER, /* contains copies of user pages */
|
||||
__REQ_INTEGRITY, /* integrity metadata has been remapped */
|
||||
__REQ_FLUSH, /* request for cache flush */
|
||||
__REQ_IO_STAT, /* account I/O stat */
|
||||
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
|
||||
|
|
@ -172,6 +174,7 @@ enum rq_flag_bits {
|
|||
|
||||
#define REQ_UNPLUG (1 << __REQ_UNPLUG)
|
||||
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
|
||||
#define REQ_THROTTLED (1 << __REQ_THROTTLED)
|
||||
|
||||
#define REQ_SORTED (1 << __REQ_SORTED)
|
||||
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
|
||||
|
|
@ -187,7 +190,6 @@ enum rq_flag_bits {
|
|||
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
|
||||
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
|
||||
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
|
||||
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
|
||||
#define REQ_FLUSH (1 << __REQ_FLUSH)
|
||||
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
|
||||
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
|
||||
|
|
|
|||
|
|
@ -115,6 +115,7 @@ struct request {
|
|||
void *elevator_private3;
|
||||
|
||||
struct gendisk *rq_disk;
|
||||
struct hd_struct *part;
|
||||
unsigned long start_time;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
unsigned long long start_time_ns;
|
||||
|
|
@ -124,6 +125,9 @@ struct request {
|
|||
* physical address coalescing is performed.
|
||||
*/
|
||||
unsigned short nr_phys_segments;
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
unsigned short nr_integrity_segments;
|
||||
#endif
|
||||
|
||||
unsigned short ioprio;
|
||||
|
||||
|
|
@ -243,6 +247,7 @@ struct queue_limits {
|
|||
|
||||
unsigned short logical_block_size;
|
||||
unsigned short max_segments;
|
||||
unsigned short max_integrity_segments;
|
||||
|
||||
unsigned char misaligned;
|
||||
unsigned char discard_misaligned;
|
||||
|
|
@ -367,6 +372,11 @@ struct request_queue
|
|||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
struct bsg_class_device bsg_dev;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING
|
||||
/* Throttle data */
|
||||
struct throtl_data *td;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
|
||||
|
|
@ -851,7 +861,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
|||
extern void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
unsigned int max_discard_sectors);
|
||||
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
|
||||
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_alignment_offset(struct request_queue *q,
|
||||
unsigned int alignment);
|
||||
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
||||
|
|
@ -1004,7 +1014,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
|
|||
return q->limits.physical_block_size;
|
||||
}
|
||||
|
||||
static inline int bdev_physical_block_size(struct block_device *bdev)
|
||||
static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
|
||||
{
|
||||
return queue_physical_block_size(bdev_get_queue(bdev));
|
||||
}
|
||||
|
|
@ -1093,11 +1103,11 @@ static inline int queue_dma_alignment(struct request_queue *q)
|
|||
return q ? q->dma_alignment : 511;
|
||||
}
|
||||
|
||||
static inline int blk_rq_aligned(struct request_queue *q, void *addr,
|
||||
static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
|
||||
unsigned int len)
|
||||
{
|
||||
unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
|
||||
return !((unsigned long)addr & alignment) && !(len & alignment);
|
||||
return !(addr & alignment) && !(len & alignment);
|
||||
}
|
||||
|
||||
/* assumes size > 256 */
|
||||
|
|
@ -1127,6 +1137,7 @@ static inline void put_dev_sector(Sector p)
|
|||
|
||||
struct work_struct;
|
||||
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
|
||||
int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
/*
|
||||
|
|
@ -1170,6 +1181,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING
|
||||
extern int blk_throtl_init(struct request_queue *q);
|
||||
extern void blk_throtl_exit(struct request_queue *q);
|
||||
extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
|
||||
extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
|
||||
extern void throtl_shutdown_timer_wq(struct request_queue *q);
|
||||
#else /* CONFIG_BLK_DEV_THROTTLING */
|
||||
static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
|
||||
static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
|
||||
static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
|
||||
static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
|
||||
#endif /* CONFIG_BLK_DEV_THROTTLING */
|
||||
|
||||
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
||||
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
||||
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
|
||||
|
|
@ -1213,8 +1242,13 @@ struct blk_integrity {
|
|||
extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
|
||||
extern void blk_integrity_unregister(struct gendisk *);
|
||||
extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
|
||||
extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
|
||||
extern int blk_rq_count_integrity_sg(struct request *);
|
||||
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
|
||||
struct scatterlist *);
|
||||
extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
|
||||
extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
|
||||
struct request *);
|
||||
extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
|
||||
struct bio *);
|
||||
|
||||
static inline
|
||||
struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
|
||||
|
|
@ -1235,16 +1269,32 @@ static inline int blk_integrity_rq(struct request *rq)
|
|||
return bio_integrity(rq->bio);
|
||||
}
|
||||
|
||||
static inline void blk_queue_max_integrity_segments(struct request_queue *q,
|
||||
unsigned int segs)
|
||||
{
|
||||
q->limits.max_integrity_segments = segs;
|
||||
}
|
||||
|
||||
static inline unsigned short
|
||||
queue_max_integrity_segments(struct request_queue *q)
|
||||
{
|
||||
return q->limits.max_integrity_segments;
|
||||
}
|
||||
|
||||
#else /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
#define blk_integrity_rq(rq) (0)
|
||||
#define blk_rq_count_integrity_sg(a) (0)
|
||||
#define blk_rq_map_integrity_sg(a, b) (0)
|
||||
#define blk_rq_count_integrity_sg(a, b) (0)
|
||||
#define blk_rq_map_integrity_sg(a, b, c) (0)
|
||||
#define bdev_get_integrity(a) (0)
|
||||
#define blk_get_integrity(a) (0)
|
||||
#define blk_integrity_compare(a, b) (0)
|
||||
#define blk_integrity_register(a, b) (0)
|
||||
#define blk_integrity_unregister(a) do { } while (0);
|
||||
#define blk_queue_max_integrity_segments(a, b) do { } while (0);
|
||||
#define queue_max_integrity_segments(a) (0)
|
||||
#define blk_integrity_merge_rq(a, b, c) (0)
|
||||
#define blk_integrity_merge_bio(a, b, c) (0)
|
||||
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
|
|
|
|||
|
|
@ -122,6 +122,8 @@ extern void elv_completed_request(struct request_queue *, struct request *);
|
|||
extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
|
||||
extern void elv_put_request(struct request_queue *, struct request *);
|
||||
extern void elv_drain_elevator(struct request_queue *);
|
||||
extern void elv_quiesce_start(struct request_queue *);
|
||||
extern void elv_quiesce_end(struct request_queue *);
|
||||
|
||||
/*
|
||||
* io scheduler registration
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
|
|
@ -86,7 +87,15 @@ struct disk_stats {
|
|||
unsigned long io_ticks;
|
||||
unsigned long time_in_queue;
|
||||
};
|
||||
|
||||
|
||||
#define PARTITION_META_INFO_VOLNAMELTH 64
|
||||
#define PARTITION_META_INFO_UUIDLTH 16
|
||||
|
||||
struct partition_meta_info {
|
||||
u8 uuid[PARTITION_META_INFO_UUIDLTH]; /* always big endian */
|
||||
u8 volname[PARTITION_META_INFO_VOLNAMELTH];
|
||||
};
|
||||
|
||||
struct hd_struct {
|
||||
sector_t start_sect;
|
||||
sector_t nr_sects;
|
||||
|
|
@ -95,6 +104,7 @@ struct hd_struct {
|
|||
struct device __dev;
|
||||
struct kobject *holder_dir;
|
||||
int policy, partno;
|
||||
struct partition_meta_info *info;
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
int make_it_fail;
|
||||
#endif
|
||||
|
|
@ -130,6 +140,7 @@ struct disk_part_tbl {
|
|||
struct rcu_head rcu_head;
|
||||
int len;
|
||||
struct hd_struct __rcu *last_lookup;
|
||||
struct gendisk *disk;
|
||||
struct hd_struct __rcu *part[];
|
||||
};
|
||||
|
||||
|
|
@ -181,6 +192,30 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < 16; ++i) {
|
||||
*to++ = (hex_to_bin(*uuid_str) << 4) |
|
||||
(hex_to_bin(*(uuid_str + 1)));
|
||||
uuid_str += 2;
|
||||
switch (i) {
|
||||
case 3:
|
||||
case 5:
|
||||
case 7:
|
||||
case 9:
|
||||
uuid_str++;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline char *part_unpack_uuid(const u8 *uuid, char *out)
|
||||
{
|
||||
sprintf(out, "%pU", uuid);
|
||||
return out;
|
||||
}
|
||||
|
||||
static inline int disk_max_parts(struct gendisk *disk)
|
||||
{
|
||||
if (disk->flags & GENHD_FL_EXT_DEVT)
|
||||
|
|
@ -342,6 +377,19 @@ static inline int part_in_flight(struct hd_struct *part)
|
|||
return part->in_flight[0] + part->in_flight[1];
|
||||
}
|
||||
|
||||
static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
|
||||
{
|
||||
if (disk)
|
||||
return kzalloc_node(sizeof(struct partition_meta_info),
|
||||
GFP_KERNEL, disk->node_id);
|
||||
return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void free_part_info(struct hd_struct *part)
|
||||
{
|
||||
kfree(part->info);
|
||||
}
|
||||
|
||||
/* block/blk-core.c */
|
||||
extern void part_round_stats(int cpu, struct hd_struct *part);
|
||||
|
||||
|
|
@ -533,7 +581,9 @@ extern int disk_expand_part_tbl(struct gendisk *disk, int target);
|
|||
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
|
||||
extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
|
||||
int partno, sector_t start,
|
||||
sector_t len, int flags);
|
||||
sector_t len, int flags,
|
||||
struct partition_meta_info
|
||||
*info);
|
||||
extern void delete_partition(struct gendisk *, int);
|
||||
extern void printk_all_partitions(void);
|
||||
|
||||
|
|
|
|||
|
|
@ -651,6 +651,16 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
|
|||
(void) (&_max1 == &_max2); \
|
||||
_max1 > _max2 ? _max1 : _max2; })
|
||||
|
||||
/**
|
||||
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
|
||||
* @x: value1
|
||||
* @y: value2
|
||||
*/
|
||||
#define min_not_zero(x, y) ({ \
|
||||
typeof(x) __x = (x); \
|
||||
typeof(y) __y = (y); \
|
||||
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
|
||||
|
||||
/**
|
||||
* clamp - return a value clamped to a given range with strict typechecking
|
||||
* @val: current value
|
||||
|
|
|
|||
|
|
@ -336,6 +336,9 @@ extern unsigned long sysctl_hung_task_warnings;
|
|||
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
#else
|
||||
/* Avoid need for ifdefs elsewhere in the code */
|
||||
enum { sysctl_hung_task_timeout_secs = 0 };
|
||||
#endif
|
||||
|
||||
/* Attach to any functions which should be ignored in wchan output. */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue