We have a good pile of various fixes and cleanups from Xiubo, Jeff,
Luis and others, almost exclusively in the filesystem. Several patches touch files outside of our normal purview to set the stage for bringing in Jeff's long awaited ceph+fscrypt series in the near future. All of them have appropriate acks and sat in linux-next for a while. -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmL1HF8THGlkcnlvbW92 QGdtYWlsLmNvbQAKCRBKf944AhHziwOuB/97JKHFuOlP1HrD6fYe5a0ul9zC9VG4 57XPDNqG2PSmfXCjvZhyVU4n53sUlJTqzKDSTXydoPCMQjtyHvysA6gEvcgUJFPd PHaZDCd9TmqX8my67NiTK70RVpNR9BujJMVMbOfM+aaisl0K6WQbitO+BfhEiJcK QStdKm5lPyf02ESH9jF+Ga0DpokARaLbtDFH7975owxske6gWuoPBCJNrkMooKiX LjgEmNgH1F/sJSZXftmKdlw9DtGBFaLQBdfbfSB5oVPRb7chI7xBeraNr6Od3rls o4davbFkcsOr+s6LJPDH2BJobmOg+HoMoma7ezspF7ZqBF4Uipv5j3VC =1427 -----END PGP SIGNATURE----- Merge tag 'ceph-for-5.20-rc1' of https://github.com/ceph/ceph-client Pull ceph updates from Ilya Dryomov: "We have a good pile of various fixes and cleanups from Xiubo, Jeff, Luis and others, almost exclusively in the filesystem. Several patches touch files outside of our normal purview to set the stage for bringing in Jeff's long awaited ceph+fscrypt series in the near future. All of them have appropriate acks and sat in linux-next for a while" * tag 'ceph-for-5.20-rc1' of https://github.com/ceph/ceph-client: (27 commits) libceph: clean up ceph_osdc_start_request prototype libceph: fix ceph_pagelist_reserve() comment typo ceph: remove useless check for the folio ceph: don't truncate file in atomic_open ceph: make f_bsize always equal to f_frsize ceph: flush the dirty caps immediatelly when quota is approaching libceph: print fsid and epoch with osd id libceph: check pointer before assigned to "c->rules[]" ceph: don't get the inline data for new creating files ceph: update the auth cap when the async create req is forwarded ceph: make change_auth_cap_ses a global symbol ceph: fix incorrect old_size length in ceph_mds_request_args ceph: switch back to testing for NULL folio->private in ceph_dirty_folio ceph: call netfs_subreq_terminated with was_async == false ceph: convert to generic_file_llseek ceph: fix the incorrect comment for the ceph_mds_caps struct ceph: don't leak snap_rwsem in handle_cap_grant ceph: prevent a client from exceeding the MDS maximum xattr size ceph: choose auth MDS for getxattr with the Xs caps ceph: add session already open notify support ...
This commit is contained in:
commit
786da5da56
27 changed files with 538 additions and 233 deletions
|
|
@ -433,9 +433,9 @@ union ceph_mds_request_args {
|
|||
__le32 stripe_unit; /* layout for newly created file */
|
||||
__le32 stripe_count; /* ... */
|
||||
__le32 object_size;
|
||||
__le32 file_replication;
|
||||
__le32 mask; /* CEPH_CAP_* */
|
||||
__le32 old_size;
|
||||
__le32 pool;
|
||||
__le32 mask; /* CEPH_CAP_* */
|
||||
__le64 old_size;
|
||||
} __attribute__ ((packed)) open;
|
||||
struct {
|
||||
__le32 flags;
|
||||
|
|
@ -768,7 +768,7 @@ struct ceph_mds_caps {
|
|||
__le32 xattr_len;
|
||||
__le64 xattr_version;
|
||||
|
||||
/* filelock */
|
||||
/* a union of non-export and export bodies. */
|
||||
__le64 size, max_size, truncate_size;
|
||||
__le32 truncate_seq;
|
||||
struct ceph_timespec mtime, atime, ctime;
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ struct ceph_mdsmap {
|
|||
u32 m_session_timeout; /* seconds */
|
||||
u32 m_session_autoclose; /* seconds */
|
||||
u64 m_max_file_size;
|
||||
u64 m_max_xattr_size; /* maximum size for xattrs blob */
|
||||
u32 m_max_mds; /* expected up:active mds number */
|
||||
u32 m_num_active_mds; /* actual up:active mds number */
|
||||
u32 possible_max_rank; /* possible max rank index */
|
||||
|
|
|
|||
|
|
@ -507,9 +507,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
|
|||
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
|
||||
|
||||
extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req,
|
||||
bool nofail);
|
||||
void ceph_osdc_start_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req);
|
||||
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
|
||||
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd_request *req);
|
||||
|
|
|
|||
|
|
@ -233,6 +233,8 @@ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
|
|||
wait_queue_head_t *);
|
||||
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
|
||||
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
|
||||
extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
|
||||
const struct qstr *name);
|
||||
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
|
||||
extern struct dentry *d_find_any_alias(struct inode *inode);
|
||||
extern struct dentry * d_obtain_alias(struct inode *);
|
||||
|
|
|
|||
|
|
@ -284,6 +284,7 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg);
|
|||
int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg);
|
||||
int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
|
||||
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child);
|
||||
int fscrypt_context_for_new_inode(void *ctx, struct inode *inode);
|
||||
int fscrypt_set_context(struct inode *inode, void *fs_data);
|
||||
|
||||
struct fscrypt_dummy_policy {
|
||||
|
|
@ -327,6 +328,10 @@ void fscrypt_free_inode(struct inode *inode);
|
|||
int fscrypt_drop_inode(struct inode *inode);
|
||||
|
||||
/* fname.c */
|
||||
int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
|
||||
u8 *out, unsigned int olen);
|
||||
bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
|
||||
u32 max_len, u32 *encrypted_len_ret);
|
||||
int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname,
|
||||
int lookup, struct fscrypt_name *fname);
|
||||
|
||||
|
|
|
|||
|
|
@ -54,6 +54,15 @@ void dump_mm(const struct mm_struct *mm);
|
|||
} \
|
||||
unlikely(__ret_warn_once); \
|
||||
})
|
||||
#define VM_WARN_ON_FOLIO(cond, folio) ({ \
|
||||
int __ret_warn = !!(cond); \
|
||||
\
|
||||
if (unlikely(__ret_warn)) { \
|
||||
dump_page(&folio->page, "VM_WARN_ON_FOLIO(" __stringify(cond)")");\
|
||||
WARN_ON(1); \
|
||||
} \
|
||||
unlikely(__ret_warn); \
|
||||
})
|
||||
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \
|
||||
static bool __section(".data.once") __warned; \
|
||||
int __ret_warn_once = !!(cond); \
|
||||
|
|
@ -79,6 +88,7 @@ void dump_mm(const struct mm_struct *mm);
|
|||
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue