Merge branch 'for-4.17/dax' into libnvdimm-for-next
This commit is contained in:
commit
e13e75b86e
60 changed files with 1630 additions and 1291 deletions
|
|
@ -26,18 +26,42 @@ extern struct attribute_group dax_attribute_group;
|
|||
|
||||
#if IS_ENABLED(CONFIG_DAX)
|
||||
struct dax_device *dax_get_by_host(const char *host);
|
||||
struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops);
|
||||
void put_dax(struct dax_device *dax_dev);
|
||||
void kill_dax(struct dax_device *dax_dev);
|
||||
void dax_write_cache(struct dax_device *dax_dev, bool wc);
|
||||
bool dax_write_cache_enabled(struct dax_device *dax_dev);
|
||||
#else
|
||||
static inline struct dax_device *dax_get_by_host(const char *host)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops)
|
||||
{
|
||||
/*
|
||||
* Callers should check IS_ENABLED(CONFIG_DAX) to know if this
|
||||
* NULL is an error or expected.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
static inline void put_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
}
|
||||
static inline void kill_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
}
|
||||
static inline void dax_write_cache(struct dax_device *dax_dev, bool wc)
|
||||
{
|
||||
}
|
||||
static inline bool dax_write_cache_enabled(struct dax_device *dax_dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct writeback_control;
|
||||
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
||||
#if IS_ENABLED(CONFIG_FS_DAX)
|
||||
int __bdev_dax_supported(struct super_block *sb, int blocksize);
|
||||
|
|
@ -57,6 +81,8 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
|
|||
}
|
||||
|
||||
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
|
||||
int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
struct block_device *bdev, struct writeback_control *wbc);
|
||||
#else
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
|
|
@ -76,22 +102,23 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
|
|||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
struct block_device *bdev, struct writeback_control *wbc)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
int dax_read_lock(void);
|
||||
void dax_read_unlock(int id);
|
||||
struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops);
|
||||
bool dax_alive(struct dax_device *dax_dev);
|
||||
void kill_dax(struct dax_device *dax_dev);
|
||||
void *dax_get_private(struct dax_device *dax_dev);
|
||||
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
|
||||
void **kaddr, pfn_t *pfn);
|
||||
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
|
||||
size_t bytes, struct iov_iter *i);
|
||||
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
|
||||
void dax_write_cache(struct dax_device *dax_dev, bool wc);
|
||||
bool dax_write_cache_enabled(struct dax_device *dax_dev);
|
||||
|
||||
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops);
|
||||
|
|
@ -121,7 +148,4 @@ static inline bool dax_mapping(struct address_space *mapping)
|
|||
return mapping->host && IS_DAX(mapping->host);
|
||||
}
|
||||
|
||||
struct writeback_control;
|
||||
int dax_writeback_mapping_range(struct address_space *mapping,
|
||||
struct block_device *bdev, struct writeback_control *wbc);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -3130,6 +3130,10 @@ extern int simple_rmdir(struct inode *, struct dentry *);
|
|||
extern int simple_rename(struct inode *, struct dentry *,
|
||||
struct inode *, struct dentry *, unsigned int);
|
||||
extern int noop_fsync(struct file *, loff_t, loff_t, int);
|
||||
extern int noop_set_page_dirty(struct page *page);
|
||||
extern void noop_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length);
|
||||
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
|
||||
extern int simple_empty(struct dentry *);
|
||||
extern int simple_readpage(struct file *file, struct page *page);
|
||||
extern int simple_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
|
|
|||
|
|
@ -1,8 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_SCHED_DEADLINE_H
|
||||
#define _LINUX_SCHED_DEADLINE_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* SCHED_DEADLINE tasks has negative priorities, reflecting
|
||||
|
|
@ -28,5 +24,3 @@ static inline bool dl_time_before(u64 a, u64 b)
|
|||
{
|
||||
return (s64)(a - b) < 0;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_SCHED_DEADLINE_H */
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ enum hk_flags {
|
|||
HK_FLAG_SCHED = (1 << 3),
|
||||
HK_FLAG_TICK = (1 << 4),
|
||||
HK_FLAG_DOMAIN = (1 << 5),
|
||||
HK_FLAG_WQ = (1 << 6),
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_ISOLATION
|
||||
|
|
|
|||
|
|
@ -37,8 +37,4 @@ extern void wake_up_nohz_cpu(int cpu);
|
|||
static inline void wake_up_nohz_cpu(int cpu) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
extern u64 scheduler_tick_max_deferment(void);
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_SCHED_NOHZ_H */
|
||||
|
|
|
|||
|
|
@ -113,7 +113,8 @@ enum tick_dep_bits {
|
|||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
extern bool tick_nohz_enabled;
|
||||
extern int tick_nohz_tick_stopped(void);
|
||||
extern bool tick_nohz_tick_stopped(void);
|
||||
extern bool tick_nohz_tick_stopped_cpu(int cpu);
|
||||
extern void tick_nohz_idle_enter(void);
|
||||
extern void tick_nohz_idle_exit(void);
|
||||
extern void tick_nohz_irq_exit(void);
|
||||
|
|
@ -125,6 +126,7 @@ extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
|||
#else /* !CONFIG_NO_HZ_COMMON */
|
||||
#define tick_nohz_enabled (0)
|
||||
static inline int tick_nohz_tick_stopped(void) { return 0; }
|
||||
static inline int tick_nohz_tick_stopped_cpu(int cpu) { return 0; }
|
||||
static inline void tick_nohz_idle_enter(void) { }
|
||||
static inline void tick_nohz_idle_exit(void) { }
|
||||
|
||||
|
|
|
|||
|
|
@ -262,4 +262,74 @@ int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode
|
|||
return out_of_line_wait_on_atomic_t(val, action, mode);
|
||||
}
|
||||
|
||||
extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags);
|
||||
extern void wake_up_var(void *var);
|
||||
extern wait_queue_head_t *__var_waitqueue(void *p);
|
||||
|
||||
#define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
|
||||
({ \
|
||||
__label__ __out; \
|
||||
struct wait_queue_head *__wq_head = __var_waitqueue(var); \
|
||||
struct wait_bit_queue_entry __wbq_entry; \
|
||||
long __ret = ret; /* explicit shadow */ \
|
||||
\
|
||||
init_wait_var_entry(&__wbq_entry, var, \
|
||||
exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
|
||||
for (;;) { \
|
||||
long __int = prepare_to_wait_event(__wq_head, \
|
||||
&__wbq_entry.wq_entry, \
|
||||
state); \
|
||||
if (condition) \
|
||||
break; \
|
||||
\
|
||||
if (___wait_is_interruptible(state) && __int) { \
|
||||
__ret = __int; \
|
||||
goto __out; \
|
||||
} \
|
||||
\
|
||||
cmd; \
|
||||
} \
|
||||
finish_wait(__wq_head, &__wbq_entry.wq_entry); \
|
||||
__out: __ret; \
|
||||
})
|
||||
|
||||
#define __wait_var_event(var, condition) \
|
||||
___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
|
||||
schedule())
|
||||
|
||||
#define wait_var_event(var, condition) \
|
||||
do { \
|
||||
might_sleep(); \
|
||||
if (condition) \
|
||||
break; \
|
||||
__wait_var_event(var, condition); \
|
||||
} while (0)
|
||||
|
||||
#define __wait_var_event_killable(var, condition) \
|
||||
___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
|
||||
schedule())
|
||||
|
||||
#define wait_var_event_killable(var, condition) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
might_sleep(); \
|
||||
if (!(condition)) \
|
||||
__ret = __wait_var_event_killable(var, condition); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __wait_var_event_timeout(var, condition, timeout) \
|
||||
___wait_var_event(var, ___wait_cond_timeout(condition), \
|
||||
TASK_UNINTERRUPTIBLE, 0, timeout, \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
#define wait_var_event_timeout(var, condition, timeout) \
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
might_sleep(); \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
__ret = __wait_var_event_timeout(var, condition, timeout); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#endif /* _LINUX_WAIT_BIT_H */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue