Merge branch 'core/urgent' into x86/urgent, to pick up objtool fix

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2018-11-03 23:42:16 +01:00
commit 23a12ddee1
4068 changed files with 262354 additions and 74077 deletions

View file

@ -28,8 +28,8 @@
* The available bitmap operations and their rough meaning in the
* case that the bitmap is a single unsigned long are thus:
*
* Note that nbits should be always a compile time evaluable constant.
* Otherwise many inlines will generate horrible code.
* The generated code is more efficient when nbits is known at
* compile-time and at most BITS_PER_LONG.
*
* ::
*
@ -204,38 +204,31 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
/*
* The static inlines below do not handle constant nbits==0 correctly,
* so make such users (should any ever turn up) call the out-of-line
* versions.
*/
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~0UL;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
/*
@ -398,7 +391,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
}
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, int nbits)
unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;

View file

@ -236,33 +236,33 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
#ifdef __KERNEL__
#ifndef set_mask_bits
#define set_mask_bits(ptr, _mask, _bits) \
#define set_mask_bits(ptr, mask, bits) \
({ \
const typeof(*ptr) mask = (_mask), bits = (_bits); \
typeof(*ptr) old, new; \
const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
typeof(*(ptr)) old__, new__; \
\
do { \
old = READ_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
old__ = READ_ONCE(*(ptr)); \
new__ = (old__ & ~mask__) | bits__; \
} while (cmpxchg(ptr, old__, new__) != old__); \
\
new; \
new__; \
})
#endif
#ifndef bit_clear_unless
#define bit_clear_unless(ptr, _clear, _test) \
#define bit_clear_unless(ptr, clear, test) \
({ \
const typeof(*ptr) clear = (_clear), test = (_test); \
typeof(*ptr) old, new; \
const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
typeof(*(ptr)) old__, new__; \
\
do { \
old = READ_ONCE(*ptr); \
new = old & ~clear; \
} while (!(old & test) && \
cmpxchg(ptr, old, new) != old); \
old__ = READ_ONCE(*(ptr)); \
new__ = old__ & ~clear__; \
} while (!(old__ & test__) && \
cmpxchg(ptr, old__, new__) != old__); \
\
!(old & test); \
!(old__ & test__); \
})
#endif

View file

@ -1,404 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
*/
#ifndef _LINUX_BOOTMEM_H
#define _LINUX_BOOTMEM_H
#include <linux/mmzone.h>
#include <linux/mm_types.h>
#include <asm/dma.h>
#include <asm/processor.h>
/*
* simple boot-time physical memory area allocator.
*/
extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;
/*
* highest page
*/
extern unsigned long max_pfn;
/*
* highest possible page
*/
extern unsigned long long max_possible_pfn;
#ifndef CONFIG_NO_BOOTMEM
/**
* struct bootmem_data - per-node information used by the bootmem allocator
* @node_min_pfn: the starting physical address of the node's memory
* @node_low_pfn: the end physical address of the directly addressable memory
* @node_bootmem_map: is a bitmap pointer - the bits represent all physical
* memory pages (including holes) on the node.
* @last_end_off: the offset within the page of the end of the last allocation;
* if 0, the page used is full
* @hint_idx: the PFN of the page used with the last allocation;
* together with using this with the @last_end_offset field,
* a test can be made to see if allocations can be merged
* with the page used for the last allocation rather than
* using up a full new page.
* @list: list entry in the linked list ordered by the memory addresses
*/
typedef struct bootmem_data {
unsigned long node_min_pfn;
unsigned long node_low_pfn;
void *node_bootmem_map;
unsigned long last_end_off;
unsigned long hint_idx;
struct list_head list;
} bootmem_data_t;
extern bootmem_data_t bootmem_node_data[];
#endif
extern unsigned long bootmem_bootmap_pages(unsigned long);
extern unsigned long init_bootmem_node(pg_data_t *pgdat,
unsigned long freepfn,
unsigned long startpfn,
unsigned long endpfn);
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
extern unsigned long free_all_bootmem(void);
extern void reset_node_managed_pages(pg_data_t *pgdat);
extern void reset_all_zones_managed_pages(void);
extern void free_bootmem_node(pg_data_t *pgdat,
unsigned long addr,
unsigned long size);
extern void free_bootmem(unsigned long physaddr, unsigned long size);
extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
/*
* Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
* the architecture-specific code should honor this).
*
* If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
* If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
* already was reserved.
*/
#define BOOTMEM_DEFAULT 0
#define BOOTMEM_EXCLUSIVE (1<<0)
extern int reserve_bootmem(unsigned long addr,
unsigned long size,
int flags);
extern int reserve_bootmem_node(pg_data_t *pgdat,
unsigned long physaddr,
unsigned long size,
int flags);
extern void *__alloc_bootmem(unsigned long size,
unsigned long align,
unsigned long goal);
extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
void *__alloc_bootmem_node_high(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit) __malloc;
extern void *__alloc_bootmem_low(unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
void *__alloc_bootmem_low_nopanic(unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal) __malloc;
#ifdef CONFIG_NO_BOOTMEM
/* We are using top down, so it is safe to use 0 here */
#define BOOTMEM_LOW_LIMIT 0
#else
#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
#endif
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_align(x, align) \
__alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_pages(x) \
__alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_pages_nopanic(x) \
__alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_node_nopanic(pgdat, x) \
__alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_pages_node(pgdat, x) \
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
__alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
#define alloc_bootmem_low(x) \
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
#define alloc_bootmem_low_pages_nopanic(x) \
__alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_low(x, PAGE_SIZE, 0)
#define alloc_bootmem_low_pages_node(pgdat, x) \
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
/* FIXME: use MEMBLOCK_ALLOC_* variants here */
#define BOOTMEM_ALLOC_ACCESSIBLE 0
#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
void *memblock_virt_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr,
phys_addr_t max_addr, int nid);
void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
phys_addr_t align, phys_addr_t min_addr,
phys_addr_t max_addr, int nid);
void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid);
void __memblock_free_early(phys_addr_t base, phys_addr_t size);
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
static inline void * __init memblock_virt_alloc(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_raw(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid_raw(size, align, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_nopanic(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid_nopanic(size, align,
BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_low(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid(size, align,
BOOTMEM_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_low_nopanic(
phys_addr_t size, phys_addr_t align)
{
return memblock_virt_alloc_try_nid_nopanic(size, align,
BOOTMEM_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_from_nopanic(
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
{
return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_virt_alloc_node(
phys_addr_t size, int nid)
{
return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE, nid);
}
static inline void * __init memblock_virt_alloc_node_nopanic(
phys_addr_t size, int nid)
{
return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
nid);
}
static inline void __init memblock_free_early(
phys_addr_t base, phys_addr_t size)
{
__memblock_free_early(base, size);
}
static inline void __init memblock_free_early_nid(
phys_addr_t base, phys_addr_t size, int nid)
{
__memblock_free_early(base, size);
}
static inline void __init memblock_free_late(
phys_addr_t base, phys_addr_t size)
{
__memblock_free_late(base, size);
}
#else
#define BOOTMEM_ALLOC_ACCESSIBLE 0
/* Fall back to all the existing bootmem APIs */
static inline void * __init memblock_virt_alloc(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_raw(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_nopanic(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_low(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_low(size, align, 0);
}
static inline void * __init memblock_virt_alloc_low_nopanic(
phys_addr_t size, phys_addr_t align)
{
if (!align)
align = SMP_CACHE_BYTES;
return __alloc_bootmem_low_nopanic(size, align, 0);
}
static inline void * __init memblock_virt_alloc_from_nopanic(
phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
{
return __alloc_bootmem_nopanic(size, align, min_addr);
}
static inline void * __init memblock_virt_alloc_node(
phys_addr_t size, int nid)
{
return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_node_nopanic(
phys_addr_t size, int nid)
{
return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
SMP_CACHE_BYTES,
BOOTMEM_LOW_LIMIT);
}
static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{
return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
min_addr);
}
static inline void * __init memblock_virt_alloc_try_nid_raw(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{
return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
min_addr, max_addr);
}
static inline void * __init memblock_virt_alloc_try_nid_nopanic(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{
return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
min_addr, max_addr);
}
static inline void __init memblock_free_early(
phys_addr_t base, phys_addr_t size)
{
free_bootmem(base, size);
}
static inline void __init memblock_free_early_nid(
phys_addr_t base, phys_addr_t size, int nid)
{
free_bootmem_node(NODE_DATA(nid), base, size);
}
static inline void __init memblock_free_late(
phys_addr_t base, phys_addr_t size)
{
free_bootmem_late(base, size);
}
#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
extern void *alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
unsigned long numentries,
int scale,
int flags,
unsigned int *_hash_shift,
unsigned int *_hash_mask,
unsigned long low_limit,
unsigned long high_limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
* shift passed via *_hash_shift */
#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
*/
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
extern int hashdist; /* Distribute hashes across NUMA nodes? */
#else
#define hashdist (0)
#endif
#endif /* _LINUX_BOOTMEM_H */

View file

@ -81,7 +81,13 @@ struct ceph_options {
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
/*
* Handle the largest possible rbd object in one message.
* There is no limit on the size of cephfs objects, but it has to obey
* rsize and wsize mount options anyway.
*/
#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
#define CEPH_AUTH_NAME_DEFAULT "guest"

View file

@ -82,22 +82,6 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
};
static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
{
switch (type) {
case CEPH_MSG_DATA_NONE:
case CEPH_MSG_DATA_PAGES:
case CEPH_MSG_DATA_PAGELIST:
#ifdef CONFIG_BLOCK
case CEPH_MSG_DATA_BIO:
#endif /* CONFIG_BLOCK */
case CEPH_MSG_DATA_BVECS:
return true;
default:
return false;
}
}
#ifdef CONFIG_BLOCK
struct ceph_bio_iter {
@ -181,7 +165,6 @@ struct ceph_bvec_iter {
} while (0)
struct ceph_msg_data {
struct list_head links; /* ceph_msg->data */
enum ceph_msg_data_type type;
union {
#ifdef CONFIG_BLOCK
@ -202,7 +185,6 @@ struct ceph_msg_data {
struct ceph_msg_data_cursor {
size_t total_resid; /* across all data items */
struct list_head *data_head; /* = &ceph_msg->data */
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
@ -240,7 +222,9 @@ struct ceph_msg {
struct ceph_buffer *middle;
size_t data_length;
struct list_head data;
struct ceph_msg_data *data;
int num_data_items;
int max_data_items;
struct ceph_msg_data_cursor cursor;
struct ceph_connection *con;
@ -381,6 +365,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail);
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail);

View file

@ -13,14 +13,15 @@ struct ceph_msgpool {
mempool_t *pool;
int type; /* preallocated message type */
int front_len; /* preallocated payload size */
int max_data_items;
};
extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
int front_len, int size, bool blocking,
const char *name);
int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
int front_len, int max_data_items, int size,
const char *name);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
int front_len);
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
int max_data_items);
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
#endif

View file

@ -136,6 +136,13 @@ struct ceph_osd_req_op {
u64 expected_object_size;
u64 expected_write_size;
} alloc_hint;
struct {
u64 snapid;
u64 src_version;
u8 flags;
u32 src_fadvise_flags;
struct ceph_osd_data osd_data;
} copy_from;
};
};
@ -444,9 +451,8 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
extern int osd_req_op_cls_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode,
const char *class, const char *method);
int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
const char *class, const char *method);
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode);
@ -511,6 +517,16 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
struct timespec64 *mtime,
struct page **pages, int nr_pages);
int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
u64 src_snapid, u64 src_version,
struct ceph_object_id *src_oid,
struct ceph_object_locator *src_oloc,
u32 src_fadvise_flags,
struct ceph_object_id *dst_oid,
struct ceph_object_locator *dst_oloc,
u32 dst_fadvise_flags,
u8 copy_from_flags);
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,

View file

@ -23,16 +23,7 @@ struct ceph_pagelist_cursor {
size_t room; /* room remaining to reset to */
};
static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
{
INIT_LIST_HEAD(&pl->head);
pl->mapped_tail = NULL;
pl->length = 0;
pl->room = 0;
INIT_LIST_HEAD(&pl->free_list);
pl->num_pages_free = 0;
refcount_set(&pl->refcnt, 1);
}
struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);

View file

@ -410,6 +410,14 @@ enum {
enum {
CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
CEPH_OSD_OP_FLAG_FADVISE_RANDOM = 0x4, /* the op is random */
CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL = 0x8, /* the op is sequential */
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED = 0x10,/* data will be accessed in
the near future */
CEPH_OSD_OP_FLAG_FADVISE_DONTNEED = 0x20,/* data will not be accessed
in the near future */
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE = 0x40,/* data will be accessed only
once by this client */
};
#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
@ -431,6 +439,15 @@ enum {
CEPH_OSD_CMPXATTR_MODE_U64 = 2
};
enum {
CEPH_OSD_COPY_FROM_FLAG_FLUSH = 1, /* part of a flush operation */
CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY = 2, /* ignore pool overlay */
CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE = 4, /* ignore osd cache logic */
CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
* cloneid */
CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */
};
enum {
CEPH_OSD_WATCH_OP_UNWATCH = 0,
CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
@ -497,6 +514,17 @@ struct ceph_osd_op {
__le64 expected_object_size;
__le64 expected_write_size;
} __attribute__ ((packed)) alloc_hint;
struct {
__le64 snapid;
__le64 src_version;
__u8 flags; /* CEPH_OSD_COPY_FROM_FLAG_* */
/*
* CEPH_OSD_OP_FLAG_FADVISE_*: fadvise flags
* for src object, flags for dest object are in
* ceph_osd_op::flags.
*/
__le32 src_fadvise_flags;
} __attribute__ ((packed)) copy_from;
};
__le32 payload_len;
} __attribute__ ((packed));

View file

@ -119,6 +119,11 @@ struct clk_duty {
* Called with enable_lock held. This function must not
* sleep.
*
* @save_context: Save the context of the clock in prepration for poweroff.
*
* @restore_context: Restore the context of the clock after a restoration
* of power.
*
* @recalc_rate Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
* ensure that the prepare_mutex is held across this call.
@ -223,6 +228,8 @@ struct clk_ops {
void (*disable)(struct clk_hw *hw);
int (*is_enabled)(struct clk_hw *hw);
void (*disable_unused)(struct clk_hw *hw);
int (*save_context)(struct clk_hw *hw);
void (*restore_context)(struct clk_hw *hw);
unsigned long (*recalc_rate)(struct clk_hw *hw,
unsigned long parent_rate);
long (*round_rate)(struct clk_hw *hw, unsigned long rate,
@ -1011,5 +1018,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
#endif /* platform dependent I/O accessors */
void clk_gate_restore_context(struct clk_hw *hw);
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */

View file

@ -312,7 +312,26 @@ struct clk *clk_get(struct device *dev, const char *id);
*/
int __must_check clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
* clk_bulk_get_all - lookup and obtain all available references to clock
* producer.
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
*
* This helper function allows drivers to get all clk consumers in one
* operation. If any of the clk cannot be acquired then any clks
* that were obtained will be freed before returning to the caller.
*
* Returns a positive value for the number of clocks obtained while the
* clock references are stored in the clk_bulk_data table in @clks field.
* Returns 0 if there're none and a negative value if something failed.
*
* Drivers must assume that the clock source is not enabled.
*
* clk_bulk_get should not be called from within interrupt context.
*/
int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
/**
* devm_clk_bulk_get - managed get multiple clk consumers
* @dev: device for clock "consumer"
@ -327,6 +346,22 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks,
*/
int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks);
/**
* devm_clk_bulk_get_all - managed get multiple clk consumers
* @dev: device for clock "consumer"
* @clks: pointer to the clk_bulk_data table of consumer
*
* Returns a positive value for the number of clocks obtained while the
* clock references are stored in the clk_bulk_data table in @clks field.
* Returns 0 if there're none and a negative value if something failed.
*
* This helper function allows drivers to get several clk
* consumers in one operation with management, the clks will
* automatically be freed when the device is unbound.
*/
int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
@ -487,6 +522,19 @@ void clk_put(struct clk *clk);
*/
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
/**
* clk_bulk_put_all - "free" all the clock source
* @num_clks: the number of clk_bulk_data
* @clks: the clk_bulk_data table of consumer
*
* Note: drivers must ensure that all clk_bulk_enable calls made on this
* clock source are balanced by clk_bulk_disable calls prior to calling
* this function.
*
* clk_bulk_put_all should not be called from within interrupt context.
*/
void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
/**
* devm_clk_put - "free" a managed clock source
* @dev: device used to acquire the clock
@ -629,6 +677,23 @@ struct clk *clk_get_parent(struct clk *clk);
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
/**
* clk_save_context - save clock context for poweroff
*
* Saves the context of the clock register for powerstates in which the
* contents of the registers will be lost. Occurs deep within the suspend
* code so locking is not necessary.
*/
int clk_save_context(void);
/**
* clk_restore_context - restore clock context after poweroff
*
* This occurs with all clocks enabled. Occurs deep within the resume code
* so locking is not necessary.
*/
void clk_restore_context(void);
#else /* !CONFIG_HAVE_CLK */
static inline struct clk *clk_get(struct device *dev, const char *id)
@ -642,6 +707,12 @@ static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
return 0;
}
static inline int __must_check clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
return 0;
}
static inline struct clk *devm_clk_get(struct device *dev, const char *id)
{
return NULL;
@ -653,6 +724,13 @@ static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clk
return 0;
}
static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks)
{
return 0;
}
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
@ -663,6 +741,8 @@ static inline void clk_put(struct clk *clk) {}
static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
@ -728,6 +808,14 @@ static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
{
return NULL;
}
static inline int clk_save_context(void)
{
return 0;
}
static inline void clk_restore_context(void) {}
#endif
/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */

View file

@ -1,14 +1,10 @@
/*
/* SPDX-License-Identifier: GPL-2.0+
*
* Copyright 2013 Ideas On Board SPRL
* Copyright 2013, 2014 Horms Solutions Ltd.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Contact: Simon Horman <horms@verge.net.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __LINUX_CLK_RENESAS_H_

View file

@ -159,6 +159,7 @@ struct clk_hw_omap {
const char *clkdm_name;
struct clockdomain *clkdm;
const struct clk_hw_omap_ops *ops;
u32 context;
};
/*
@ -290,9 +291,15 @@ struct ti_clk_features {
#define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1)
#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
#define TI_CLK_ERRATA_I810 BIT(3)
#define TI_CLK_CLKCTRL_COMPAT BIT(4)
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
int omap3_noncore_dpll_save_context(struct clk_hw *hw);
void omap3_noncore_dpll_restore_context(struct clk_hw *hw);
int omap3_core_dpll_save_context(struct clk_hw *hw);
void omap3_core_dpll_restore_context(struct clk_hw *hw);
extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;

View file

@ -488,8 +488,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
/* fall through */
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
/* fall through */
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
/* fall through */
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
}
return copy_to_user(compat, &v, size) ? -EFAULT : 0;

View file

@ -344,29 +344,14 @@ static inline void *offset_to_ptr(const int *off)
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
/*
* Sparse complains of variable sized arrays due to the temporary variable in
* __compiletime_assert. Unfortunately we can't just expand it out to make
* sparse see a constant array size without breaking compiletime_assert on old
* versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
*/
# ifndef __CHECKER__
# define __compiletime_error_fallback(condition) \
do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
# endif
#endif
#ifndef __compiletime_error_fallback
# define __compiletime_error_fallback(condition) do { } while (0)
#endif
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
int __cond = !(condition); \
extern void prefix ## suffix(void) __compiletime_error(msg); \
if (__cond) \
if (!(condition)) \
prefix ## suffix(); \
__compiletime_error_fallback(__cond); \
} while (0)
#else
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)

View file

@ -141,7 +141,6 @@ struct vc_data {
struct uni_pagedir *vc_uni_pagedir;
struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
struct uni_screen *vc_uni_screen; /* unicode screen content */
bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
/* additional information is in vt_kern.h */
};

View file

@ -4,6 +4,61 @@
#include <uapi/linux/fanotify.h>
/* not valid from userspace, only kernel internal */
#define FAN_MARK_ONDIR 0x00000100
#define FAN_GROUP_FLAG(group, flag) \
((group)->fanotify_data.flags & (flag))
/*
* Flags allowed to be passed from/to userspace.
*
* We intentionally do not add new bits to the old FAN_ALL_* constants, because
* they are uapi exposed constants. If there are programs out there using
* these constant, the programs may break if re-compiled with new uapi headers
* and then run on an old kernel.
*/
#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \
FAN_CLASS_PRE_CONTENT)
#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \
FAN_REPORT_TID | \
FAN_CLOEXEC | FAN_NONBLOCK | \
FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS)
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
FAN_MARK_FILESYSTEM)
#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
FAN_MARK_ADD | \
FAN_MARK_REMOVE | \
FAN_MARK_DONT_FOLLOW | \
FAN_MARK_ONLYDIR | \
FAN_MARK_IGNORED_MASK | \
FAN_MARK_IGNORED_SURV_MODIFY | \
FAN_MARK_FLUSH)
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \
FAN_CLOSE | FAN_OPEN)
/* Events that require a permission response from user */
#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM)
/* Extra flags that may be reported with event or control handling of events */
#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
/* Events that may be reported to user */
#define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \
FANOTIFY_PERM_EVENTS | \
FAN_Q_OVERFLOW)
#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
FANOTIFY_EVENT_FLAGS)
/* Do not use these old uapi constants internally */
#undef FAN_ALL_CLASS_BITS
#undef FAN_ALL_INIT_FLAGS
#undef FAN_ALL_MARK_FLAGS
#undef FAN_ALL_EVENTS
#undef FAN_ALL_PERM_EVENTS
#undef FAN_ALL_OUTGOING_EVENTS
#endif /* _LINUX_FANOTIFY_H */

View file

@ -456,10 +456,13 @@ struct fb_tile_ops {
* and host endianness. Drivers should not use this flag.
*/
#define FBINFO_BE_MATH 0x100000
/*
* Hide smem_start in the FBIOGET_FSCREENINFO IOCTL. This is used by modern DRM
* drivers to stop userspace from trying to share buffers behind the kernel's
* back. Instead dma-buf based buffer sharing should be used.
*/
#define FBINFO_HIDE_SMEM_START 0x200000
/* report to the VT layer that this fb driver can accept forced console
output like oopses */
#define FBINFO_CAN_FORCE_OUTPUT 0x200000
struct fb_info {
atomic_t count;
@ -632,6 +635,8 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id,
const char *name);
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);

View file

@ -854,6 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern int bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);

View file

@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2018 NXP
*
* Header file for the IPC implementation.
*/
#ifndef _SC_IPC_H
#define _SC_IPC_H
#include <linux/device.h>
#include <linux/types.h>
#define IMX_SC_RPC_VERSION 1
#define IMX_SC_RPC_MAX_MSG 8
struct imx_sc_ipc;
enum imx_sc_rpc_svc {
IMX_SC_RPC_SVC_UNKNOWN = 0,
IMX_SC_RPC_SVC_RETURN = 1,
IMX_SC_RPC_SVC_PM = 2,
IMX_SC_RPC_SVC_RM = 3,
IMX_SC_RPC_SVC_TIMER = 5,
IMX_SC_RPC_SVC_PAD = 6,
IMX_SC_RPC_SVC_MISC = 7,
IMX_SC_RPC_SVC_IRQ = 8,
IMX_SC_RPC_SVC_ABORT = 9
};
struct imx_sc_rpc_msg {
uint8_t ver;
uint8_t size;
uint8_t svc;
uint8_t func;
};
/*
* This is an function to send an RPC message over an IPC channel.
* It is called by client-side SCFW API function shims.
*
* @param[in] ipc IPC handle
* @param[in,out] msg handle to a message
* @param[in] have_resp response flag
*
* If have_resp is true then this function waits for a response
* and returns the result in msg.
*/
int imx_scu_call_rpc(struct imx_sc_ipc *ipc, void *msg, bool have_resp);
/*
* This function gets the default ipc handle used by SCU
*
* @param[out] ipc sc ipc handle
*
* @return Returns an error code (0 = success, failed if < 0)
*/
int imx_scu_get_handle(struct imx_sc_ipc **ipc);
#endif /* _SC_IPC_H */

View file

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017~2018 NXP
*
* Header file containing the public System Controller Interface (SCI)
* definitions.
*/
#ifndef _SC_SCI_H
#define _SC_SCI_H
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/svc/misc.h>
#endif /* _SC_SCI_H */

View file

@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017~2018 NXP
*
* Header file containing the public API for the System Controller (SC)
* Miscellaneous (MISC) function.
*
* MISC_SVC (SVC) Miscellaneous Service
*
* Module for the Miscellaneous (MISC) service.
*/
#ifndef _SC_MISC_API_H
#define _SC_MISC_API_H
#include <linux/firmware/imx/sci.h>
/*
* This type is used to indicate RPC MISC function calls.
*/
enum imx_misc_func {
IMX_SC_MISC_FUNC_UNKNOWN = 0,
IMX_SC_MISC_FUNC_SET_CONTROL = 1,
IMX_SC_MISC_FUNC_GET_CONTROL = 2,
IMX_SC_MISC_FUNC_SET_MAX_DMA_GROUP = 4,
IMX_SC_MISC_FUNC_SET_DMA_GROUP = 5,
IMX_SC_MISC_FUNC_SECO_IMAGE_LOAD = 8,
IMX_SC_MISC_FUNC_SECO_AUTHENTICATE = 9,
IMX_SC_MISC_FUNC_DEBUG_OUT = 10,
IMX_SC_MISC_FUNC_WAVEFORM_CAPTURE = 6,
IMX_SC_MISC_FUNC_BUILD_INFO = 15,
IMX_SC_MISC_FUNC_UNIQUE_ID = 19,
IMX_SC_MISC_FUNC_SET_ARI = 3,
IMX_SC_MISC_FUNC_BOOT_STATUS = 7,
IMX_SC_MISC_FUNC_BOOT_DONE = 14,
IMX_SC_MISC_FUNC_OTP_FUSE_READ = 11,
IMX_SC_MISC_FUNC_OTP_FUSE_WRITE = 17,
IMX_SC_MISC_FUNC_SET_TEMP = 12,
IMX_SC_MISC_FUNC_GET_TEMP = 13,
IMX_SC_MISC_FUNC_GET_BOOT_DEV = 16,
IMX_SC_MISC_FUNC_GET_BUTTON_STATUS = 18,
};
/*
* Control Functions
*/
int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 val);
int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
u8 ctrl, u32 *val);
#endif /* _SC_MISC_API_H */

View file

@ -0,0 +1,617 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017~2018 NXP
*
* Header file containing types used across multiple service APIs.
*/
#ifndef _SC_TYPES_H
#define _SC_TYPES_H
/*
* This type is used to indicate a resource. Resources include peripherals
* and bus masters (but not memory regions). Note items from list should
* never be changed or removed (only added to at the end of the list).
*/
enum imx_sc_rsrc {
IMX_SC_R_A53 = 0,
IMX_SC_R_A53_0 = 1,
IMX_SC_R_A53_1 = 2,
IMX_SC_R_A53_2 = 3,
IMX_SC_R_A53_3 = 4,
IMX_SC_R_A72 = 5,
IMX_SC_R_A72_0 = 6,
IMX_SC_R_A72_1 = 7,
IMX_SC_R_A72_2 = 8,
IMX_SC_R_A72_3 = 9,
IMX_SC_R_CCI = 10,
IMX_SC_R_DB = 11,
IMX_SC_R_DRC_0 = 12,
IMX_SC_R_DRC_1 = 13,
IMX_SC_R_GIC_SMMU = 14,
IMX_SC_R_IRQSTR_M4_0 = 15,
IMX_SC_R_IRQSTR_M4_1 = 16,
IMX_SC_R_SMMU = 17,
IMX_SC_R_GIC = 18,
IMX_SC_R_DC_0_BLIT0 = 19,
IMX_SC_R_DC_0_BLIT1 = 20,
IMX_SC_R_DC_0_BLIT2 = 21,
IMX_SC_R_DC_0_BLIT_OUT = 22,
IMX_SC_R_DC_0_CAPTURE0 = 23,
IMX_SC_R_DC_0_CAPTURE1 = 24,
IMX_SC_R_DC_0_WARP = 25,
IMX_SC_R_DC_0_INTEGRAL0 = 26,
IMX_SC_R_DC_0_INTEGRAL1 = 27,
IMX_SC_R_DC_0_VIDEO0 = 28,
IMX_SC_R_DC_0_VIDEO1 = 29,
IMX_SC_R_DC_0_FRAC0 = 30,
IMX_SC_R_DC_0_FRAC1 = 31,
IMX_SC_R_DC_0 = 32,
IMX_SC_R_GPU_2_PID0 = 33,
IMX_SC_R_DC_0_PLL_0 = 34,
IMX_SC_R_DC_0_PLL_1 = 35,
IMX_SC_R_DC_1_BLIT0 = 36,
IMX_SC_R_DC_1_BLIT1 = 37,
IMX_SC_R_DC_1_BLIT2 = 38,
IMX_SC_R_DC_1_BLIT_OUT = 39,
IMX_SC_R_DC_1_CAPTURE0 = 40,
IMX_SC_R_DC_1_CAPTURE1 = 41,
IMX_SC_R_DC_1_WARP = 42,
IMX_SC_R_DC_1_INTEGRAL0 = 43,
IMX_SC_R_DC_1_INTEGRAL1 = 44,
IMX_SC_R_DC_1_VIDEO0 = 45,
IMX_SC_R_DC_1_VIDEO1 = 46,
IMX_SC_R_DC_1_FRAC0 = 47,
IMX_SC_R_DC_1_FRAC1 = 48,
IMX_SC_R_DC_1 = 49,
IMX_SC_R_GPU_3_PID0 = 50,
IMX_SC_R_DC_1_PLL_0 = 51,
IMX_SC_R_DC_1_PLL_1 = 52,
IMX_SC_R_SPI_0 = 53,
IMX_SC_R_SPI_1 = 54,
IMX_SC_R_SPI_2 = 55,
IMX_SC_R_SPI_3 = 56,
IMX_SC_R_UART_0 = 57,
IMX_SC_R_UART_1 = 58,
IMX_SC_R_UART_2 = 59,
IMX_SC_R_UART_3 = 60,
IMX_SC_R_UART_4 = 61,
IMX_SC_R_EMVSIM_0 = 62,
IMX_SC_R_EMVSIM_1 = 63,
IMX_SC_R_DMA_0_CH0 = 64,
IMX_SC_R_DMA_0_CH1 = 65,
IMX_SC_R_DMA_0_CH2 = 66,
IMX_SC_R_DMA_0_CH3 = 67,
IMX_SC_R_DMA_0_CH4 = 68,
IMX_SC_R_DMA_0_CH5 = 69,
IMX_SC_R_DMA_0_CH6 = 70,
IMX_SC_R_DMA_0_CH7 = 71,
IMX_SC_R_DMA_0_CH8 = 72,
IMX_SC_R_DMA_0_CH9 = 73,
IMX_SC_R_DMA_0_CH10 = 74,
IMX_SC_R_DMA_0_CH11 = 75,
IMX_SC_R_DMA_0_CH12 = 76,
IMX_SC_R_DMA_0_CH13 = 77,
IMX_SC_R_DMA_0_CH14 = 78,
IMX_SC_R_DMA_0_CH15 = 79,
IMX_SC_R_DMA_0_CH16 = 80,
IMX_SC_R_DMA_0_CH17 = 81,
IMX_SC_R_DMA_0_CH18 = 82,
IMX_SC_R_DMA_0_CH19 = 83,
IMX_SC_R_DMA_0_CH20 = 84,
IMX_SC_R_DMA_0_CH21 = 85,
IMX_SC_R_DMA_0_CH22 = 86,
IMX_SC_R_DMA_0_CH23 = 87,
IMX_SC_R_DMA_0_CH24 = 88,
IMX_SC_R_DMA_0_CH25 = 89,
IMX_SC_R_DMA_0_CH26 = 90,
IMX_SC_R_DMA_0_CH27 = 91,
IMX_SC_R_DMA_0_CH28 = 92,
IMX_SC_R_DMA_0_CH29 = 93,
IMX_SC_R_DMA_0_CH30 = 94,
IMX_SC_R_DMA_0_CH31 = 95,
IMX_SC_R_I2C_0 = 96,
IMX_SC_R_I2C_1 = 97,
IMX_SC_R_I2C_2 = 98,
IMX_SC_R_I2C_3 = 99,
IMX_SC_R_I2C_4 = 100,
IMX_SC_R_ADC_0 = 101,
IMX_SC_R_ADC_1 = 102,
IMX_SC_R_FTM_0 = 103,
IMX_SC_R_FTM_1 = 104,
IMX_SC_R_CAN_0 = 105,
IMX_SC_R_CAN_1 = 106,
IMX_SC_R_CAN_2 = 107,
IMX_SC_R_DMA_1_CH0 = 108,
IMX_SC_R_DMA_1_CH1 = 109,
IMX_SC_R_DMA_1_CH2 = 110,
IMX_SC_R_DMA_1_CH3 = 111,
IMX_SC_R_DMA_1_CH4 = 112,
IMX_SC_R_DMA_1_CH5 = 113,
IMX_SC_R_DMA_1_CH6 = 114,
IMX_SC_R_DMA_1_CH7 = 115,
IMX_SC_R_DMA_1_CH8 = 116,
IMX_SC_R_DMA_1_CH9 = 117,
IMX_SC_R_DMA_1_CH10 = 118,
IMX_SC_R_DMA_1_CH11 = 119,
IMX_SC_R_DMA_1_CH12 = 120,
IMX_SC_R_DMA_1_CH13 = 121,
IMX_SC_R_DMA_1_CH14 = 122,
IMX_SC_R_DMA_1_CH15 = 123,
IMX_SC_R_DMA_1_CH16 = 124,
IMX_SC_R_DMA_1_CH17 = 125,
IMX_SC_R_DMA_1_CH18 = 126,
IMX_SC_R_DMA_1_CH19 = 127,
IMX_SC_R_DMA_1_CH20 = 128,
IMX_SC_R_DMA_1_CH21 = 129,
IMX_SC_R_DMA_1_CH22 = 130,
IMX_SC_R_DMA_1_CH23 = 131,
IMX_SC_R_DMA_1_CH24 = 132,
IMX_SC_R_DMA_1_CH25 = 133,
IMX_SC_R_DMA_1_CH26 = 134,
IMX_SC_R_DMA_1_CH27 = 135,
IMX_SC_R_DMA_1_CH28 = 136,
IMX_SC_R_DMA_1_CH29 = 137,
IMX_SC_R_DMA_1_CH30 = 138,
IMX_SC_R_DMA_1_CH31 = 139,
IMX_SC_R_UNUSED1 = 140,
IMX_SC_R_UNUSED2 = 141,
IMX_SC_R_UNUSED3 = 142,
IMX_SC_R_UNUSED4 = 143,
IMX_SC_R_GPU_0_PID0 = 144,
IMX_SC_R_GPU_0_PID1 = 145,
IMX_SC_R_GPU_0_PID2 = 146,
IMX_SC_R_GPU_0_PID3 = 147,
IMX_SC_R_GPU_1_PID0 = 148,
IMX_SC_R_GPU_1_PID1 = 149,
IMX_SC_R_GPU_1_PID2 = 150,
IMX_SC_R_GPU_1_PID3 = 151,
IMX_SC_R_PCIE_A = 152,
IMX_SC_R_SERDES_0 = 153,
IMX_SC_R_MATCH_0 = 154,
IMX_SC_R_MATCH_1 = 155,
IMX_SC_R_MATCH_2 = 156,
IMX_SC_R_MATCH_3 = 157,
IMX_SC_R_MATCH_4 = 158,
IMX_SC_R_MATCH_5 = 159,
IMX_SC_R_MATCH_6 = 160,
IMX_SC_R_MATCH_7 = 161,
IMX_SC_R_MATCH_8 = 162,
IMX_SC_R_MATCH_9 = 163,
IMX_SC_R_MATCH_10 = 164,
IMX_SC_R_MATCH_11 = 165,
IMX_SC_R_MATCH_12 = 166,
IMX_SC_R_MATCH_13 = 167,
IMX_SC_R_MATCH_14 = 168,
IMX_SC_R_PCIE_B = 169,
IMX_SC_R_SATA_0 = 170,
IMX_SC_R_SERDES_1 = 171,
IMX_SC_R_HSIO_GPIO = 172,
IMX_SC_R_MATCH_15 = 173,
IMX_SC_R_MATCH_16 = 174,
IMX_SC_R_MATCH_17 = 175,
IMX_SC_R_MATCH_18 = 176,
IMX_SC_R_MATCH_19 = 177,
IMX_SC_R_MATCH_20 = 178,
IMX_SC_R_MATCH_21 = 179,
IMX_SC_R_MATCH_22 = 180,
IMX_SC_R_MATCH_23 = 181,
IMX_SC_R_MATCH_24 = 182,
IMX_SC_R_MATCH_25 = 183,
IMX_SC_R_MATCH_26 = 184,
IMX_SC_R_MATCH_27 = 185,
IMX_SC_R_MATCH_28 = 186,
IMX_SC_R_LCD_0 = 187,
IMX_SC_R_LCD_0_PWM_0 = 188,
IMX_SC_R_LCD_0_I2C_0 = 189,
IMX_SC_R_LCD_0_I2C_1 = 190,
IMX_SC_R_PWM_0 = 191,
IMX_SC_R_PWM_1 = 192,
IMX_SC_R_PWM_2 = 193,
IMX_SC_R_PWM_3 = 194,
IMX_SC_R_PWM_4 = 195,
IMX_SC_R_PWM_5 = 196,
IMX_SC_R_PWM_6 = 197,
IMX_SC_R_PWM_7 = 198,
IMX_SC_R_GPIO_0 = 199,
IMX_SC_R_GPIO_1 = 200,
IMX_SC_R_GPIO_2 = 201,
IMX_SC_R_GPIO_3 = 202,
IMX_SC_R_GPIO_4 = 203,
IMX_SC_R_GPIO_5 = 204,
IMX_SC_R_GPIO_6 = 205,
IMX_SC_R_GPIO_7 = 206,
IMX_SC_R_GPT_0 = 207,
IMX_SC_R_GPT_1 = 208,
IMX_SC_R_GPT_2 = 209,
IMX_SC_R_GPT_3 = 210,
IMX_SC_R_GPT_4 = 211,
IMX_SC_R_KPP = 212,
IMX_SC_R_MU_0A = 213,
IMX_SC_R_MU_1A = 214,
IMX_SC_R_MU_2A = 215,
IMX_SC_R_MU_3A = 216,
IMX_SC_R_MU_4A = 217,
IMX_SC_R_MU_5A = 218,
IMX_SC_R_MU_6A = 219,
IMX_SC_R_MU_7A = 220,
IMX_SC_R_MU_8A = 221,
IMX_SC_R_MU_9A = 222,
IMX_SC_R_MU_10A = 223,
IMX_SC_R_MU_11A = 224,
IMX_SC_R_MU_12A = 225,
IMX_SC_R_MU_13A = 226,
IMX_SC_R_MU_5B = 227,
IMX_SC_R_MU_6B = 228,
IMX_SC_R_MU_7B = 229,
IMX_SC_R_MU_8B = 230,
IMX_SC_R_MU_9B = 231,
IMX_SC_R_MU_10B = 232,
IMX_SC_R_MU_11B = 233,
IMX_SC_R_MU_12B = 234,
IMX_SC_R_MU_13B = 235,
IMX_SC_R_ROM_0 = 236,
IMX_SC_R_FSPI_0 = 237,
IMX_SC_R_FSPI_1 = 238,
IMX_SC_R_IEE = 239,
IMX_SC_R_IEE_R0 = 240,
IMX_SC_R_IEE_R1 = 241,
IMX_SC_R_IEE_R2 = 242,
IMX_SC_R_IEE_R3 = 243,
IMX_SC_R_IEE_R4 = 244,
IMX_SC_R_IEE_R5 = 245,
IMX_SC_R_IEE_R6 = 246,
IMX_SC_R_IEE_R7 = 247,
IMX_SC_R_SDHC_0 = 248,
IMX_SC_R_SDHC_1 = 249,
IMX_SC_R_SDHC_2 = 250,
IMX_SC_R_ENET_0 = 251,
IMX_SC_R_ENET_1 = 252,
IMX_SC_R_MLB_0 = 253,
IMX_SC_R_DMA_2_CH0 = 254,
IMX_SC_R_DMA_2_CH1 = 255,
IMX_SC_R_DMA_2_CH2 = 256,
IMX_SC_R_DMA_2_CH3 = 257,
IMX_SC_R_DMA_2_CH4 = 258,
IMX_SC_R_USB_0 = 259,
IMX_SC_R_USB_1 = 260,
IMX_SC_R_USB_0_PHY = 261,
IMX_SC_R_USB_2 = 262,
IMX_SC_R_USB_2_PHY = 263,
IMX_SC_R_DTCP = 264,
IMX_SC_R_NAND = 265,
IMX_SC_R_LVDS_0 = 266,
IMX_SC_R_LVDS_0_PWM_0 = 267,
IMX_SC_R_LVDS_0_I2C_0 = 268,
IMX_SC_R_LVDS_0_I2C_1 = 269,
IMX_SC_R_LVDS_1 = 270,
IMX_SC_R_LVDS_1_PWM_0 = 271,
IMX_SC_R_LVDS_1_I2C_0 = 272,
IMX_SC_R_LVDS_1_I2C_1 = 273,
IMX_SC_R_LVDS_2 = 274,
IMX_SC_R_LVDS_2_PWM_0 = 275,
IMX_SC_R_LVDS_2_I2C_0 = 276,
IMX_SC_R_LVDS_2_I2C_1 = 277,
IMX_SC_R_M4_0_PID0 = 278,
IMX_SC_R_M4_0_PID1 = 279,
IMX_SC_R_M4_0_PID2 = 280,
IMX_SC_R_M4_0_PID3 = 281,
IMX_SC_R_M4_0_PID4 = 282,
IMX_SC_R_M4_0_RGPIO = 283,
IMX_SC_R_M4_0_SEMA42 = 284,
IMX_SC_R_M4_0_TPM = 285,
IMX_SC_R_M4_0_PIT = 286,
IMX_SC_R_M4_0_UART = 287,
IMX_SC_R_M4_0_I2C = 288,
IMX_SC_R_M4_0_INTMUX = 289,
IMX_SC_R_M4_0_SIM = 290,
IMX_SC_R_M4_0_WDOG = 291,
IMX_SC_R_M4_0_MU_0B = 292,
IMX_SC_R_M4_0_MU_0A0 = 293,
IMX_SC_R_M4_0_MU_0A1 = 294,
IMX_SC_R_M4_0_MU_0A2 = 295,
IMX_SC_R_M4_0_MU_0A3 = 296,
IMX_SC_R_M4_0_MU_1A = 297,
IMX_SC_R_M4_1_PID0 = 298,
IMX_SC_R_M4_1_PID1 = 299,
IMX_SC_R_M4_1_PID2 = 300,
IMX_SC_R_M4_1_PID3 = 301,
IMX_SC_R_M4_1_PID4 = 302,
IMX_SC_R_M4_1_RGPIO = 303,
IMX_SC_R_M4_1_SEMA42 = 304,
IMX_SC_R_M4_1_TPM = 305,
IMX_SC_R_M4_1_PIT = 306,
IMX_SC_R_M4_1_UART = 307,
IMX_SC_R_M4_1_I2C = 308,
IMX_SC_R_M4_1_INTMUX = 309,
IMX_SC_R_M4_1_SIM = 310,
IMX_SC_R_M4_1_WDOG = 311,
IMX_SC_R_M4_1_MU_0B = 312,
IMX_SC_R_M4_1_MU_0A0 = 313,
IMX_SC_R_M4_1_MU_0A1 = 314,
IMX_SC_R_M4_1_MU_0A2 = 315,
IMX_SC_R_M4_1_MU_0A3 = 316,
IMX_SC_R_M4_1_MU_1A = 317,
IMX_SC_R_SAI_0 = 318,
IMX_SC_R_SAI_1 = 319,
IMX_SC_R_SAI_2 = 320,
IMX_SC_R_IRQSTR_SCU2 = 321,
IMX_SC_R_IRQSTR_DSP = 322,
IMX_SC_R_UNUSED5 = 323,
IMX_SC_R_UNUSED6 = 324,
IMX_SC_R_AUDIO_PLL_0 = 325,
IMX_SC_R_PI_0 = 326,
IMX_SC_R_PI_0_PWM_0 = 327,
IMX_SC_R_PI_0_PWM_1 = 328,
IMX_SC_R_PI_0_I2C_0 = 329,
IMX_SC_R_PI_0_PLL = 330,
IMX_SC_R_PI_1 = 331,
IMX_SC_R_PI_1_PWM_0 = 332,
IMX_SC_R_PI_1_PWM_1 = 333,
IMX_SC_R_PI_1_I2C_0 = 334,
IMX_SC_R_PI_1_PLL = 335,
IMX_SC_R_SC_PID0 = 336,
IMX_SC_R_SC_PID1 = 337,
IMX_SC_R_SC_PID2 = 338,
IMX_SC_R_SC_PID3 = 339,
IMX_SC_R_SC_PID4 = 340,
IMX_SC_R_SC_SEMA42 = 341,
IMX_SC_R_SC_TPM = 342,
IMX_SC_R_SC_PIT = 343,
IMX_SC_R_SC_UART = 344,
IMX_SC_R_SC_I2C = 345,
IMX_SC_R_SC_MU_0B = 346,
IMX_SC_R_SC_MU_0A0 = 347,
IMX_SC_R_SC_MU_0A1 = 348,
IMX_SC_R_SC_MU_0A2 = 349,
IMX_SC_R_SC_MU_0A3 = 350,
IMX_SC_R_SC_MU_1A = 351,
IMX_SC_R_SYSCNT_RD = 352,
IMX_SC_R_SYSCNT_CMP = 353,
IMX_SC_R_DEBUG = 354,
IMX_SC_R_SYSTEM = 355,
IMX_SC_R_SNVS = 356,
IMX_SC_R_OTP = 357,
IMX_SC_R_VPU_PID0 = 358,
IMX_SC_R_VPU_PID1 = 359,
IMX_SC_R_VPU_PID2 = 360,
IMX_SC_R_VPU_PID3 = 361,
IMX_SC_R_VPU_PID4 = 362,
IMX_SC_R_VPU_PID5 = 363,
IMX_SC_R_VPU_PID6 = 364,
IMX_SC_R_VPU_PID7 = 365,
IMX_SC_R_VPU_UART = 366,
IMX_SC_R_VPUCORE = 367,
IMX_SC_R_VPUCORE_0 = 368,
IMX_SC_R_VPUCORE_1 = 369,
IMX_SC_R_VPUCORE_2 = 370,
IMX_SC_R_VPUCORE_3 = 371,
IMX_SC_R_DMA_4_CH0 = 372,
IMX_SC_R_DMA_4_CH1 = 373,
IMX_SC_R_DMA_4_CH2 = 374,
IMX_SC_R_DMA_4_CH3 = 375,
IMX_SC_R_DMA_4_CH4 = 376,
IMX_SC_R_ISI_CH0 = 377,
IMX_SC_R_ISI_CH1 = 378,
IMX_SC_R_ISI_CH2 = 379,
IMX_SC_R_ISI_CH3 = 380,
IMX_SC_R_ISI_CH4 = 381,
IMX_SC_R_ISI_CH5 = 382,
IMX_SC_R_ISI_CH6 = 383,
IMX_SC_R_ISI_CH7 = 384,
IMX_SC_R_MJPEG_DEC_S0 = 385,
IMX_SC_R_MJPEG_DEC_S1 = 386,
IMX_SC_R_MJPEG_DEC_S2 = 387,
IMX_SC_R_MJPEG_DEC_S3 = 388,
IMX_SC_R_MJPEG_ENC_S0 = 389,
IMX_SC_R_MJPEG_ENC_S1 = 390,
IMX_SC_R_MJPEG_ENC_S2 = 391,
IMX_SC_R_MJPEG_ENC_S3 = 392,
IMX_SC_R_MIPI_0 = 393,
IMX_SC_R_MIPI_0_PWM_0 = 394,
IMX_SC_R_MIPI_0_I2C_0 = 395,
IMX_SC_R_MIPI_0_I2C_1 = 396,
IMX_SC_R_MIPI_1 = 397,
IMX_SC_R_MIPI_1_PWM_0 = 398,
IMX_SC_R_MIPI_1_I2C_0 = 399,
IMX_SC_R_MIPI_1_I2C_1 = 400,
IMX_SC_R_CSI_0 = 401,
IMX_SC_R_CSI_0_PWM_0 = 402,
IMX_SC_R_CSI_0_I2C_0 = 403,
IMX_SC_R_CSI_1 = 404,
IMX_SC_R_CSI_1_PWM_0 = 405,
IMX_SC_R_CSI_1_I2C_0 = 406,
IMX_SC_R_HDMI = 407,
IMX_SC_R_HDMI_I2S = 408,
IMX_SC_R_HDMI_I2C_0 = 409,
IMX_SC_R_HDMI_PLL_0 = 410,
IMX_SC_R_HDMI_RX = 411,
IMX_SC_R_HDMI_RX_BYPASS = 412,
IMX_SC_R_HDMI_RX_I2C_0 = 413,
IMX_SC_R_ASRC_0 = 414,
IMX_SC_R_ESAI_0 = 415,
IMX_SC_R_SPDIF_0 = 416,
IMX_SC_R_SPDIF_1 = 417,
IMX_SC_R_SAI_3 = 418,
IMX_SC_R_SAI_4 = 419,
IMX_SC_R_SAI_5 = 420,
IMX_SC_R_GPT_5 = 421,
IMX_SC_R_GPT_6 = 422,
IMX_SC_R_GPT_7 = 423,
IMX_SC_R_GPT_8 = 424,
IMX_SC_R_GPT_9 = 425,
IMX_SC_R_GPT_10 = 426,
IMX_SC_R_DMA_2_CH5 = 427,
IMX_SC_R_DMA_2_CH6 = 428,
IMX_SC_R_DMA_2_CH7 = 429,
IMX_SC_R_DMA_2_CH8 = 430,
IMX_SC_R_DMA_2_CH9 = 431,
IMX_SC_R_DMA_2_CH10 = 432,
IMX_SC_R_DMA_2_CH11 = 433,
IMX_SC_R_DMA_2_CH12 = 434,
IMX_SC_R_DMA_2_CH13 = 435,
IMX_SC_R_DMA_2_CH14 = 436,
IMX_SC_R_DMA_2_CH15 = 437,
IMX_SC_R_DMA_2_CH16 = 438,
IMX_SC_R_DMA_2_CH17 = 439,
IMX_SC_R_DMA_2_CH18 = 440,
IMX_SC_R_DMA_2_CH19 = 441,
IMX_SC_R_DMA_2_CH20 = 442,
IMX_SC_R_DMA_2_CH21 = 443,
IMX_SC_R_DMA_2_CH22 = 444,
IMX_SC_R_DMA_2_CH23 = 445,
IMX_SC_R_DMA_2_CH24 = 446,
IMX_SC_R_DMA_2_CH25 = 447,
IMX_SC_R_DMA_2_CH26 = 448,
IMX_SC_R_DMA_2_CH27 = 449,
IMX_SC_R_DMA_2_CH28 = 450,
IMX_SC_R_DMA_2_CH29 = 451,
IMX_SC_R_DMA_2_CH30 = 452,
IMX_SC_R_DMA_2_CH31 = 453,
IMX_SC_R_ASRC_1 = 454,
IMX_SC_R_ESAI_1 = 455,
IMX_SC_R_SAI_6 = 456,
IMX_SC_R_SAI_7 = 457,
IMX_SC_R_AMIX = 458,
IMX_SC_R_MQS_0 = 459,
IMX_SC_R_DMA_3_CH0 = 460,
IMX_SC_R_DMA_3_CH1 = 461,
IMX_SC_R_DMA_3_CH2 = 462,
IMX_SC_R_DMA_3_CH3 = 463,
IMX_SC_R_DMA_3_CH4 = 464,
IMX_SC_R_DMA_3_CH5 = 465,
IMX_SC_R_DMA_3_CH6 = 466,
IMX_SC_R_DMA_3_CH7 = 467,
IMX_SC_R_DMA_3_CH8 = 468,
IMX_SC_R_DMA_3_CH9 = 469,
IMX_SC_R_DMA_3_CH10 = 470,
IMX_SC_R_DMA_3_CH11 = 471,
IMX_SC_R_DMA_3_CH12 = 472,
IMX_SC_R_DMA_3_CH13 = 473,
IMX_SC_R_DMA_3_CH14 = 474,
IMX_SC_R_DMA_3_CH15 = 475,
IMX_SC_R_DMA_3_CH16 = 476,
IMX_SC_R_DMA_3_CH17 = 477,
IMX_SC_R_DMA_3_CH18 = 478,
IMX_SC_R_DMA_3_CH19 = 479,
IMX_SC_R_DMA_3_CH20 = 480,
IMX_SC_R_DMA_3_CH21 = 481,
IMX_SC_R_DMA_3_CH22 = 482,
IMX_SC_R_DMA_3_CH23 = 483,
IMX_SC_R_DMA_3_CH24 = 484,
IMX_SC_R_DMA_3_CH25 = 485,
IMX_SC_R_DMA_3_CH26 = 486,
IMX_SC_R_DMA_3_CH27 = 487,
IMX_SC_R_DMA_3_CH28 = 488,
IMX_SC_R_DMA_3_CH29 = 489,
IMX_SC_R_DMA_3_CH30 = 490,
IMX_SC_R_DMA_3_CH31 = 491,
IMX_SC_R_AUDIO_PLL_1 = 492,
IMX_SC_R_AUDIO_CLK_0 = 493,
IMX_SC_R_AUDIO_CLK_1 = 494,
IMX_SC_R_MCLK_OUT_0 = 495,
IMX_SC_R_MCLK_OUT_1 = 496,
IMX_SC_R_PMIC_0 = 497,
IMX_SC_R_PMIC_1 = 498,
IMX_SC_R_SECO = 499,
IMX_SC_R_CAAM_JR1 = 500,
IMX_SC_R_CAAM_JR2 = 501,
IMX_SC_R_CAAM_JR3 = 502,
IMX_SC_R_SECO_MU_2 = 503,
IMX_SC_R_SECO_MU_3 = 504,
IMX_SC_R_SECO_MU_4 = 505,
IMX_SC_R_HDMI_RX_PWM_0 = 506,
IMX_SC_R_A35 = 507,
IMX_SC_R_A35_0 = 508,
IMX_SC_R_A35_1 = 509,
IMX_SC_R_A35_2 = 510,
IMX_SC_R_A35_3 = 511,
IMX_SC_R_DSP = 512,
IMX_SC_R_DSP_RAM = 513,
IMX_SC_R_CAAM_JR1_OUT = 514,
IMX_SC_R_CAAM_JR2_OUT = 515,
IMX_SC_R_CAAM_JR3_OUT = 516,
IMX_SC_R_VPU_DEC_0 = 517,
IMX_SC_R_VPU_ENC_0 = 518,
IMX_SC_R_CAAM_JR0 = 519,
IMX_SC_R_CAAM_JR0_OUT = 520,
IMX_SC_R_PMIC_2 = 521,
IMX_SC_R_DBLOGIC = 522,
IMX_SC_R_HDMI_PLL_1 = 523,
IMX_SC_R_BOARD_R0 = 524,
IMX_SC_R_BOARD_R1 = 525,
IMX_SC_R_BOARD_R2 = 526,
IMX_SC_R_BOARD_R3 = 527,
IMX_SC_R_BOARD_R4 = 528,
IMX_SC_R_BOARD_R5 = 529,
IMX_SC_R_BOARD_R6 = 530,
IMX_SC_R_BOARD_R7 = 531,
IMX_SC_R_MJPEG_DEC_MP = 532,
IMX_SC_R_MJPEG_ENC_MP = 533,
IMX_SC_R_VPU_TS_0 = 534,
IMX_SC_R_VPU_MU_0 = 535,
IMX_SC_R_VPU_MU_1 = 536,
IMX_SC_R_VPU_MU_2 = 537,
IMX_SC_R_VPU_MU_3 = 538,
IMX_SC_R_VPU_ENC_1 = 539,
IMX_SC_R_VPU = 540,
IMX_SC_R_LAST
};
/* NOTE - please add by replacing some of the UNUSED from above! */
/*
* This type is used to indicate a control.
*/
enum imx_sc_ctrl {
IMX_SC_C_TEMP = 0,
IMX_SC_C_TEMP_HI = 1,
IMX_SC_C_TEMP_LOW = 2,
IMX_SC_C_PXL_LINK_MST1_ADDR = 3,
IMX_SC_C_PXL_LINK_MST2_ADDR = 4,
IMX_SC_C_PXL_LINK_MST_ENB = 5,
IMX_SC_C_PXL_LINK_MST1_ENB = 6,
IMX_SC_C_PXL_LINK_MST2_ENB = 7,
IMX_SC_C_PXL_LINK_SLV1_ADDR = 8,
IMX_SC_C_PXL_LINK_SLV2_ADDR = 9,
IMX_SC_C_PXL_LINK_MST_VLD = 10,
IMX_SC_C_PXL_LINK_MST1_VLD = 11,
IMX_SC_C_PXL_LINK_MST2_VLD = 12,
IMX_SC_C_SINGLE_MODE = 13,
IMX_SC_C_ID = 14,
IMX_SC_C_PXL_CLK_POLARITY = 15,
IMX_SC_C_LINESTATE = 16,
IMX_SC_C_PCIE_G_RST = 17,
IMX_SC_C_PCIE_BUTTON_RST = 18,
IMX_SC_C_PCIE_PERST = 19,
IMX_SC_C_PHY_RESET = 20,
IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21,
IMX_SC_C_PANIC = 22,
IMX_SC_C_PRIORITY_GROUP = 23,
IMX_SC_C_TXCLK = 24,
IMX_SC_C_CLKDIV = 25,
IMX_SC_C_DISABLE_50 = 26,
IMX_SC_C_DISABLE_125 = 27,
IMX_SC_C_SEL_125 = 28,
IMX_SC_C_MODE = 29,
IMX_SC_C_SYNC_CTRL0 = 30,
IMX_SC_C_KACHUNK_CNT = 31,
IMX_SC_C_KACHUNK_SEL = 32,
IMX_SC_C_SYNC_CTRL1 = 33,
IMX_SC_C_DPI_RESET = 34,
IMX_SC_C_MIPI_RESET = 35,
IMX_SC_C_DUAL_MODE = 36,
IMX_SC_C_VOLTAGE = 37,
IMX_SC_C_PXL_LINK_SEL = 38,
IMX_SC_C_OFS_SEL = 39,
IMX_SC_C_OFS_AUDIO = 40,
IMX_SC_C_OFS_PERIPH = 41,
IMX_SC_C_OFS_IRQ = 42,
IMX_SC_C_RST0 = 43,
IMX_SC_C_RST1 = 44,
IMX_SC_C_SEL0 = 45,
IMX_SC_C_LAST
};
#endif /* _SC_TYPES_H */

View file

@ -17,6 +17,7 @@ enum {
SM_EFUSE_READ,
SM_EFUSE_WRITE,
SM_EFUSE_USER_MAX,
SM_GET_CHIP_ID,
};
struct meson_sm_firmware;

View file

@ -0,0 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2018 Xilinx
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
*/
#ifndef __FIRMWARE_ZYNQMP_H__
#define __FIRMWARE_ZYNQMP_H__
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
#define ZYNQMP_PM_VERSION ((ZYNQMP_PM_VERSION_MAJOR << 16) | \
ZYNQMP_PM_VERSION_MINOR)
#define ZYNQMP_TZ_VERSION_MAJOR 1
#define ZYNQMP_TZ_VERSION_MINOR 0
#define ZYNQMP_TZ_VERSION ((ZYNQMP_TZ_VERSION_MAJOR << 16) | \
ZYNQMP_TZ_VERSION_MINOR)
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
#define PM_GET_TRUSTZONE_VERSION 0xa03
/* Number of 32bits values in payload */
#define PAYLOAD_ARG_CNT 4U
enum pm_api_id {
PM_GET_API_VERSION = 1,
PM_IOCTL = 34,
PM_QUERY_DATA,
PM_CLOCK_ENABLE,
PM_CLOCK_DISABLE,
PM_CLOCK_GETSTATE,
PM_CLOCK_SETDIVIDER,
PM_CLOCK_GETDIVIDER,
PM_CLOCK_SETRATE,
PM_CLOCK_GETRATE,
PM_CLOCK_SETPARENT,
PM_CLOCK_GETPARENT,
};
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT,
XST_PM_NO_ACCESS,
XST_PM_INVALID_NODE,
XST_PM_DOUBLE_REQ,
XST_PM_ABORT_SUSPEND,
};
enum pm_ioctl_id {
IOCTL_SET_PLL_FRAC_MODE = 8,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
};
enum pm_query_id {
PM_QID_INVALID,
PM_QID_CLOCK_GET_NAME,
PM_QID_CLOCK_GET_TOPOLOGY,
PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
PM_QID_CLOCK_GET_PARENTS,
PM_QID_CLOCK_GET_ATTRIBUTES,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
};
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
* @arg1: Argument 1 of query data
* @arg2: Argument 2 of query data
* @arg3: Argument 3 of query data
*/
struct zynqmp_pm_query_data {
u32 qid;
u32 arg1;
u32 arg2;
u32 arg3;
};
struct zynqmp_eemi_ops {
int (*get_api_version)(u32 *version);
int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
int (*clock_enable)(u32 clock_id);
int (*clock_disable)(u32 clock_id);
int (*clock_getstate)(u32 clock_id, u32 *state);
int (*clock_setdivider)(u32 clock_id, u32 divider);
int (*clock_getdivider)(u32 clock_id, u32 *divider);
int (*clock_setrate)(u32 clock_id, u64 rate);
int (*clock_getrate)(u32 clock_id, u64 *rate);
int (*clock_setparent)(u32 clock_id, u32 parent_id);
int (*clock_getparent)(u32 clock_id, u32 *parent_id);
int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out);
};
#if IS_REACHABLE(CONFIG_ARCH_ZYNQMP)
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void);
#else
static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
{
return NULL;
}
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */

View file

@ -403,24 +403,40 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
/**
* struct address_space - Contents of a cacheable, mappable object.
* @host: Owner, either the inode or the block_device.
* @i_pages: Cached pages.
* @gfp_mask: Memory allocation flags to use for allocating pages.
* @i_mmap_writable: Number of VM_SHARED mappings.
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock.
* @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
* @writeback_index: Writeback starts here.
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
* @private_lock: For use by the owner of the address_space.
* @private_list: For use by the owner of the address_space.
* @private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host; /* owner: inode, block_device */
struct radix_tree_root i_pages; /* cached pages */
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
struct rb_root_cached i_mmap; /* tree of private and shared mappings */
struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by the i_pages lock */
unsigned long nrpages; /* number of total pages */
/* number of shadow or DAX exceptional entries */
struct inode *host;
struct xarray i_pages;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
unsigned long nrexceptional;
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
unsigned long flags; /* error bits */
spinlock_t private_lock; /* for use by the address_space */
gfp_t gfp_mask; /* implicit gfp mask for allocations */
struct list_head private_list; /* for use by the address_space */
void *private_data; /* ditto */
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
errseq_t wb_err;
spinlock_t private_lock;
struct list_head private_list;
void *private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@ -467,15 +483,18 @@ struct block_device {
struct mutex bd_fsfreeze_mutex;
} __randomize_layout;
/*
* Radix-tree tags, for tagging dirty and writeback pages within the pagecache
* radix trees
*/
#define PAGECACHE_TAG_DIRTY 0
#define PAGECACHE_TAG_WRITEBACK 1
#define PAGECACHE_TAG_TOWRITE 2
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
#define PAGECACHE_TAG_DIRTY XA_MARK_0
#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
#define PAGECACHE_TAG_TOWRITE XA_MARK_2
int mapping_tagged(struct address_space *mapping, int tag);
/*
* Returns true if any of the pages in the mapping are marked with the tag.
*/
static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}
static inline void i_mmap_lock_write(struct address_space *mapping)
{
@ -1393,17 +1412,26 @@ struct super_block {
struct sb_writers s_writers;
/*
* Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
* s_fsnotify_marks together for cache efficiency. They are frequently
* accessed and rarely modified.
*/
void *s_fs_info; /* Filesystem private info */
/* Granularity of c/m/atime in ns (cannot be worse than a second) */
u32 s_time_gran;
#ifdef CONFIG_FSNOTIFY
__u32 s_fsnotify_mask;
struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
#endif
char s_id[32]; /* Informational name */
uuid_t s_uuid; /* UUID */
void *s_fs_info; /* Filesystem private info */
unsigned int s_max_links;
fmode_t s_mode;
/* Granularity of c/m/atime in ns.
Cannot be worse than a second */
u32 s_time_gran;
/*
* The next field is for VFS *only*. No filesystems have any business
* even looking at it. You had been warned.
@ -1428,6 +1456,9 @@ struct super_block {
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
/* Pending fsnotify inode refs */
atomic_long_t s_fsnotify_inode_refs;
/* Being remounted read-only */
int s_readonly_remount;

View file

@ -68,15 +68,20 @@
#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \
FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \
FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME)
/* Extra flags that may be reported with event or control handling of events */
#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
struct fsnotify_group;
struct fsnotify_event;
struct fsnotify_mark;
@ -189,10 +194,10 @@ struct fsnotify_group {
/* allows a group to block waiting for a userspace response */
struct list_head access_list;
wait_queue_head_t access_waitq;
int f_flags;
int flags; /* flags from fanotify_init() */
int f_flags; /* event_f_flags from fanotify_init() */
unsigned int max_marks;
struct user_struct *user;
bool audit;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
@ -206,12 +211,14 @@ struct fsnotify_group {
enum fsnotify_obj_type {
FSNOTIFY_OBJ_TYPE_INODE,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
static inline bool fsnotify_valid_obj_type(unsigned int type)
@ -255,6 +262,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
FSNOTIFY_ITER_FUNCS(inode, INODE)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
FSNOTIFY_ITER_FUNCS(sb, SB)
#define fsnotify_foreach_obj_type(type) \
for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
@ -267,8 +275,8 @@ struct fsnotify_mark_connector;
typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
/*
* Inode / vfsmount point to this structure which tracks all marks attached to
* the inode / vfsmount. The reference to inode / vfsmount is held by this
* Inode/vfsmount/sb point to this structure which tracks all marks attached to
* the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this
* structure. We destroy this structure when there are no more marks attached
* to it. The structure is protected by fsnotify_mark_srcu.
*/
@ -335,6 +343,7 @@ extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int dat
extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
@ -455,9 +464,13 @@ static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *gr
{
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
}
/* run all the marks in a group, and clear all of the sn marks */
static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
{
fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL);
}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct super_block *sb);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
@ -484,6 +497,9 @@ static inline void __fsnotify_inode_delete(struct inode *inode)
static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{}
static inline void fsnotify_sb_delete(struct super_block *sb)
{}
static inline void fsnotify_update_flags(struct dentry *dentry)
{}

View file

@ -101,8 +101,8 @@ enum hdmi_extended_colorimetry {
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
HDMI_EXTENDED_COLORIMETRY_OPYCC_601,
HDMI_EXTENDED_COLORIMETRY_OPRGB,
/* The following EC values are only defined in CEA-861-F. */
HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,

View file

@ -11,7 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Authors: Jé´me Glisse <jglisse@redhat.com>
* Authors: Jérôme Glisse <jglisse@redhat.com>
*/
/*
* Heterogeneous Memory Management (HMM)
@ -274,13 +274,28 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
struct hmm_mirror;
/*
* enum hmm_update_type - type of update
* enum hmm_update_event - type of update
* @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
*/
enum hmm_update_type {
enum hmm_update_event {
HMM_UPDATE_INVALIDATE,
};
/*
* struct hmm_update - HMM update informations for callback
*
* @start: virtual start address of the range to update
* @end: virtual end address of the range to update
* @event: event triggering the update (what is happening)
* @blockable: can the callback block/sleep ?
*/
struct hmm_update {
unsigned long start;
unsigned long end;
enum hmm_update_event event;
bool blockable;
};
/*
* struct hmm_mirror_ops - HMM mirror device operations callback
*
@ -300,9 +315,9 @@ struct hmm_mirror_ops {
/* sync_cpu_device_pagetables() - synchronize page tables
*
* @mirror: pointer to struct hmm_mirror
* @update_type: type of update that occurred to the CPU page table
* @start: virtual start address of the range to update
* @end: virtual end address of the range to update
* @update: update informations (see struct hmm_update)
* Returns: -EAGAIN if update.blockable false and callback need to
* block, 0 otherwise.
*
* This callback ultimately originates from mmu_notifiers when the CPU
* page table is updated. The device driver must update its page table
@ -313,10 +328,8 @@ struct hmm_mirror_ops {
* page tables are completely updated (TLBs flushed, etc); this is a
* synchronous call.
*/
void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
enum hmm_update_type update_type,
unsigned long start,
unsigned long end);
int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
const struct hmm_update *update);
};
/*

View file

@ -214,8 +214,7 @@ static inline void idr_preload_end(void)
++id, (entry) = idr_get_next((idr), &(id)))
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
* IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
@ -225,14 +224,14 @@ struct ida_bitmap {
unsigned long bitmap[IDA_BITMAP_LONGS];
};
DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
struct ida {
struct radix_tree_root ida_rt;
struct xarray xa;
};
#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
#define IDA_INIT(name) { \
.ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \
.xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
}
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
@ -292,7 +291,7 @@ static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
static inline void ida_init(struct ida *ida)
{
INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
#define ida_simple_get(ida, start, end, gfp) \
@ -301,9 +300,6 @@ static inline void ida_init(struct ida *ida)
static inline bool ida_is_empty(const struct ida *ida)
{
return radix_tree_empty(&ida->ida_rt);
return xa_empty(&ida->xa);
}
/* in lib/radix-tree.c */
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
#endif /* __IDR_H__ */

View file

@ -183,6 +183,29 @@ static inline void list_move_tail(struct list_head *list,
list_add_tail(list, head);
}
/**
* list_bulk_move_tail - move a subsection of a list to its tail
* @head: the head that will follow our entry
* @first: first entry to move
* @last: last entry to move, can be the same as first
*
* Move all entries between @first and including @last before @head.
* All three entries must belong to the same linked list.
*/
static inline void list_bulk_move_tail(struct list_head *head,
struct list_head *first,
struct list_head *last)
{
first->prev->next = last->next;
last->next->prev = first->prev;
head->prev->next = first;
first->prev = head->prev;
last->next = head;
head->prev = last;
}
/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test

View file

@ -2,7 +2,6 @@
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__
#ifdef CONFIG_HAVE_MEMBLOCK
/*
* Logical memory blocks.
*
@ -16,6 +15,19 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/dma.h>
extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;
/*
* highest page
*/
extern unsigned long max_pfn;
/*
* highest possible page
*/
extern unsigned long long max_possible_pfn;
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4
@ -120,6 +132,10 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
enum memblock_flags choose_memblock_flags(void);
unsigned long memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
@ -301,10 +317,116 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_raw(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_from(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
return memblock_alloc_try_nid(size, align, min_addr,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_low(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
return memblock_alloc_try_nid_nopanic(size, align, min_addr,
MEMBLOCK_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_node(phys_addr_t size,
phys_addr_t align, int nid)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
int nid)
{
return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
static inline void __init memblock_free_early(phys_addr_t base,
phys_addr_t size)
{
__memblock_free_early(base, size);
}
static inline void __init memblock_free_early_nid(phys_addr_t base,
phys_addr_t size, int nid)
{
__memblock_free_early(base, size);
}
static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
__memblock_free_late(base, size);
}
/*
* Set the allocation direction to bottom-up or top-down.
@ -324,10 +446,6 @@ static inline bool memblock_bottom_up(void)
return memblock.bottom_up;
}
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
enum memblock_flags flags);
@ -433,6 +551,31 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
i < memblock_type->cnt; \
i++, rgn = &memblock_type->regions[i])
extern void *alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
unsigned long numentries,
int scale,
int flags,
unsigned int *_hash_shift,
unsigned int *_hash_mask,
unsigned long low_limit,
unsigned long high_limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
* shift passed via *_hash_shift */
#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
*/
#ifdef CONFIG_NUMA
#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
extern int hashdist; /* Distribute hashes across NUMA nodes? */
#else
#define hashdist (0)
#endif
#ifdef CONFIG_MEMTEST
extern void early_memtest(phys_addr_t start, phys_addr_t end);
#else
@ -440,12 +583,6 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
{
}
#endif
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */

View file

@ -301,6 +301,7 @@ extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
extern void try_offline_node(int nid);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern void remove_memory(int nid, u64 start, u64 size);
extern void __remove_memory(int nid, u64 start, u64 size);
#else
static inline bool is_mem_section_removable(unsigned long pfn,
@ -317,11 +318,13 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
}
static inline void remove_memory(int nid, u64 start, u64 size) {}
static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
extern void __ref free_area_init_core_hotplug(int nid);
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource, bool online);
extern int arch_add_memory(int nid, u64 start, u64 size,
@ -330,7 +333,6 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size);
extern int sparse_add_one_section(struct pglist_data *pgdat,
unsigned long start_pfn, struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,

View file

@ -36,7 +36,7 @@
* I2C requires 1 additional byte for requests.
* I2C requires 2 additional bytes for responses.
* SPI requires up to 32 additional bytes for responses.
* */
*/
#define EC_PROTO_VERSION_UNKNOWN 0
#define EC_MAX_REQUEST_OVERHEAD 1
#define EC_MAX_RESPONSE_OVERHEAD 32
@ -58,13 +58,14 @@ enum {
EC_MAX_MSG_BYTES = 64 * 1024,
};
/*
* @version: Command version number (often 0)
* @command: Command to send (EC_CMD_...)
* @outsize: Outgoing length in bytes
* @insize: Max number of bytes to accept from EC
* @result: EC's response to the command (separate from communication failure)
* @data: Where to put the incoming data from EC and outgoing data to EC
/**
* struct cros_ec_command - Information about a ChromeOS EC command.
* @version: Command version number (often 0).
* @command: Command to send (EC_CMD_...).
* @outsize: Outgoing length in bytes.
* @insize: Max number of bytes to accept from the EC.
* @result: EC's response to the command (separate from communication failure).
* @data: Where to put the incoming data from EC and outgoing data to EC.
*/
struct cros_ec_command {
uint32_t version;
@ -76,48 +77,55 @@ struct cros_ec_command {
};
/**
* struct cros_ec_device - Information about a ChromeOS EC device
*
* @phys_name: name of physical comms layer (e.g. 'i2c-4')
* struct cros_ec_device - Information about a ChromeOS EC device.
* @phys_name: Name of physical comms layer (e.g. 'i2c-4').
* @dev: Device pointer for physical comms device
* @was_wake_device: true if this device was set to wake the system from
* sleep at the last suspend
* @cmd_readmem: direct read of the EC memory-mapped region, if supported
* @offset is within EC_LPC_ADDR_MEMMAP region.
* @bytes: number of bytes to read. zero means "read a string" (including
* the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
* Caller must ensure that the buffer is large enough for the result when
* reading a string.
*
* @priv: Private data
* @irq: Interrupt to use
* @id: Device id
* @din: input buffer (for data from EC)
* @dout: output buffer (for data to EC)
* \note
* These two buffers will always be dword-aligned and include enough
* space for up to 7 word-alignment bytes also, so we can ensure that
* the body of the message is always dword-aligned (64-bit).
* We use this alignment to keep ARM and x86 happy. Probably word
* alignment would be OK, there might be a small performance advantage
* to using dword.
* @din_size: size of din buffer to allocate (zero to use static din)
* @dout_size: size of dout buffer to allocate (zero to use static dout)
* @wake_enabled: true if this device can wake the system from sleep
* @suspended: true if this device had been suspended
* @cmd_xfer: send command to EC and get response
* Returns the number of bytes received if the communication succeeded, but
* that doesn't mean the EC was happy with the command. The caller
* should check msg.result for the EC's result code.
* @pkt_xfer: send packet to EC and get response
* @lock: one transaction at a time
* @mkbp_event_supported: true if this EC supports the MKBP event protocol.
* @event_notifier: interrupt event notifier for transport devices.
* @event_data: raw payload transferred with the MKBP event.
* @event_size: size in bytes of the event data.
* @was_wake_device: True if this device was set to wake the system from
* sleep at the last suspend.
* @cros_class: The class structure for this device.
* @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
* @offset: Is within EC_LPC_ADDR_MEMMAP region.
* @bytes: Number of bytes to read. zero means "read a string" (including
* the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be
* read. Caller must ensure that the buffer is large enough for the
* result when reading a string.
* @max_request: Max size of message requested.
* @max_response: Max size of message response.
* @max_passthru: Max sice of passthru message.
* @proto_version: The protocol version used for this device.
* @priv: Private data.
* @irq: Interrupt to use.
* @id: Device id.
* @din: Input buffer (for data from EC). This buffer will always be
* dword-aligned and include enough space for up to 7 word-alignment
* bytes also, so we can ensure that the body of the message is always
* dword-aligned (64-bit). We use this alignment to keep ARM and x86
* happy. Probably word alignment would be OK, there might be a small
* performance advantage to using dword.
* @dout: Output buffer (for data to EC). This buffer will always be
* dword-aligned and include enough space for up to 7 word-alignment
* bytes also, so we can ensure that the body of the message is always
* dword-aligned (64-bit). We use this alignment to keep ARM and x86
* happy. Probably word alignment would be OK, there might be a small
* performance advantage to using dword.
* @din_size: Size of din buffer to allocate (zero to use static din).
* @dout_size: Size of dout buffer to allocate (zero to use static dout).
* @wake_enabled: True if this device can wake the system from sleep.
* @suspended: True if this device had been suspended.
* @cmd_xfer: Send command to EC and get response.
* Returns the number of bytes received if the communication
* succeeded, but that doesn't mean the EC was happy with the
* command. The caller should check msg.result for the EC's result
* code.
* @pkt_xfer: Send packet to EC and get response.
* @lock: One transaction at a time.
* @mkbp_event_supported: True if this EC supports the MKBP event protocol.
* @event_notifier: Interrupt event notifier for transport devices.
* @event_data: Raw payload transferred with the MKBP event.
* @event_size: Size in bytes of the event data.
* @host_event_wake_mask: Mask of host events that cause wake from suspend.
*/
struct cros_ec_device {
/* These are used by other drivers that want to talk to the EC */
const char *phys_name;
struct device *dev;
@ -153,20 +161,19 @@ struct cros_ec_device {
};
/**
* struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
*
* struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
* @sensor_num: Id of the sensor, as reported by the EC.
*/
struct cros_ec_sensor_platform {
u8 sensor_num;
};
/* struct cros_ec_platform - ChromeOS EC platform information
*
* @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
* used in /dev/ and sysfs.
* @cmd_offset: offset to apply for each command. Set when
* registering a devicde behind another one.
/**
* struct cros_ec_platform - ChromeOS EC platform information.
* @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
* used in /dev/ and sysfs.
* @cmd_offset: Offset to apply for each command. Set when
* registering a device behind another one.
*/
struct cros_ec_platform {
const char *ec_name;
@ -175,16 +182,16 @@ struct cros_ec_platform {
struct cros_ec_debugfs;
/*
* struct cros_ec_dev - ChromeOS EC device entry point
*
* @class_dev: Device structure used in sysfs
* @cdev: Character device structure in /dev
* @ec_dev: cros_ec_device structure to talk to the physical device
* @dev: pointer to the platform device
* @debug_info: cros_ec_debugfs structure for debugging information
* @has_kb_wake_angle: true if at least 2 accelerometer are connected to the EC.
* @cmd_offset: offset to apply for each command.
/**
* struct cros_ec_dev - ChromeOS EC device entry point.
* @class_dev: Device structure used in sysfs.
* @cdev: Character device structure in /dev.
* @ec_dev: cros_ec_device structure to talk to the physical device.
* @dev: Pointer to the platform device.
* @debug_info: cros_ec_debugfs structure for debugging information.
* @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
* @cmd_offset: Offset to apply for each command.
* @features: Features supported by the EC.
*/
struct cros_ec_dev {
struct device class_dev;
@ -200,124 +207,129 @@ struct cros_ec_dev {
#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
/**
* cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
* cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
* @ec_dev: Device to suspend.
*
* This can be called by drivers to handle a suspend event.
*
* ec_dev: Device to suspend
* @return 0 if ok, -ve on error
* Return: 0 on success or negative error code.
*/
int cros_ec_suspend(struct cros_ec_device *ec_dev);
/**
* cros_ec_resume - Handle a resume operation for the ChromeOS EC device
* cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
* @ec_dev: Device to resume.
*
* This can be called by drivers to handle a resume event.
*
* @ec_dev: Device to resume
* @return 0 if ok, -ve on error
* Return: 0 on success or negative error code.
*/
int cros_ec_resume(struct cros_ec_device *ec_dev);
/**
* cros_ec_prepare_tx - Prepare an outgoing message in the output buffer
* cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
* @ec_dev: Device to register.
* @msg: Message to write.
*
* This is intended to be used by all ChromeOS EC drivers, but at present
* only SPI uses it. Once LPC uses the same protocol it can start using it.
* I2C could use it now, with a refactor of the existing code.
*
* @ec_dev: Device to register
* @msg: Message to write
* Return: 0 on success or negative error code.
*/
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
* cros_ec_check_result - Check ec_msg->result
* cros_ec_check_result() - Check ec_msg->result.
* @ec_dev: EC device.
* @msg: Message to check.
*
* This is used by ChromeOS EC drivers to check the ec_msg->result for
* errors and to warn about them.
*
* @ec_dev: EC device
* @msg: Message to check
* Return: 0 on success or negative error code.
*/
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
* cros_ec_cmd_xfer - Send a command to the ChromeOS EC
* cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
* @ec_dev: EC device.
* @msg: Message to write.
*
* Call this to send a command to the ChromeOS EC. This should be used
* instead of calling the EC's cmd_xfer() callback directly.
*
* @ec_dev: EC device
* @msg: Message to write
* Return: 0 on success or negative error code.
*/
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
* cros_ec_cmd_xfer_status - Send a command to the ChromeOS EC
* cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
* @ec_dev: EC device.
* @msg: Message to write.
*
* This function is identical to cros_ec_cmd_xfer, except it returns success
* status only if both the command was transmitted successfully and the EC
* replied with success status. It's not necessary to check msg->result when
* using this function.
*
* @ec_dev: EC device
* @msg: Message to write
* @return: Num. of bytes transferred on success, <0 on failure
* Return: The number of bytes transferred on success or negative error code.
*/
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
* cros_ec_remove - Remove a ChromeOS EC
* cros_ec_remove() - Remove a ChromeOS EC.
* @ec_dev: Device to register.
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
*
* @ec_dev: Device to register
* @return 0 if ok, -ve on error
* Return: 0 on success or negative error code.
*/
int cros_ec_remove(struct cros_ec_device *ec_dev);
/**
* cros_ec_register - Register a new ChromeOS EC, using the provided info
* cros_ec_register() - Register a new ChromeOS EC, using the provided info.
* @ec_dev: Device to register.
*
* Before calling this, allocate a pointer to a new device and then fill
* in all the fields up to the --private-- marker.
*
* @ec_dev: Device to register
* @return 0 if ok, -ve on error
* Return: 0 on success or negative error code.
*/
int cros_ec_register(struct cros_ec_device *ec_dev);
/**
* cros_ec_query_all - Query the protocol version supported by the ChromeOS EC
* cros_ec_query_all() - Query the protocol version supported by the
* ChromeOS EC.
* @ec_dev: Device to register.
*
* @ec_dev: Device to register
* @return 0 if ok, -ve on error
* Return: 0 on success or negative error code.
*/
int cros_ec_query_all(struct cros_ec_device *ec_dev);
/**
* cros_ec_get_next_event - Fetch next event from the ChromeOS EC
*
* @ec_dev: Device to fetch event from
* cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
* @ec_dev: Device to fetch event from.
* @wake_event: Pointer to a bool set to true upon return if the event might be
* treated as a wake event. Ignored if null.
*
* Returns: 0 on success, Linux error number on failure
* Return: 0 on success or negative error code.
*/
int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
/**
* cros_ec_get_host_event - Return a mask of event set by the EC.
* cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
* @ec_dev: Device to fetch event from.
*
* When MKBP is supported, when the EC raises an interrupt,
* We collect the events raised and call the functions in the ec notifier.
* When MKBP is supported, when the EC raises an interrupt, we collect the
* events raised and call the functions in the ec notifier. This function
* is a helper to know which events are raised.
*
* This function is a helper to know which events are raised.
* Return: 0 on success or negative error code.
*/
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);

View file

@ -306,15 +306,18 @@ enum host_event_code {
/* Host event mask */
#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1))
/* Arguments at EC_LPC_ADDR_HOST_ARGS */
/**
* struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS
* @flags: The host argument flags.
* @command_version: Command version.
* @data_size: The length of data.
* @checksum: Checksum; sum of command + flags + command_version + data_size +
* all params/response data bytes.
*/
struct ec_lpc_host_args {
uint8_t flags;
uint8_t command_version;
uint8_t data_size;
/*
* Checksum; sum of command + flags + command_version + data_size +
* all params/response data bytes.
*/
uint8_t checksum;
} __packed;
@ -468,54 +471,43 @@ struct ec_lpc_host_args {
#define EC_HOST_REQUEST_VERSION 3
/* Version 3 request from host */
/**
* struct ec_host_request - Version 3 request from host.
* @struct_version: Should be 3. The EC will return EC_RES_INVALID_HEADER if it
* receives a header with a version it doesn't know how to
* parse.
* @checksum: Checksum of request and data; sum of all bytes including checksum
* should total to 0.
* @command: Command to send (EC_CMD_...)
* @command_version: Command version.
* @reserved: Unused byte in current protocol version; set to 0.
* @data_len: Length of data which follows this header.
*/
struct ec_host_request {
/* Struct version (=3)
*
* EC will return EC_RES_INVALID_HEADER if it receives a header with a
* version it doesn't know how to parse.
*/
uint8_t struct_version;
/*
* Checksum of request and data; sum of all bytes including checksum
* should total to 0.
*/
uint8_t checksum;
/* Command code */
uint16_t command;
/* Command version */
uint8_t command_version;
/* Unused byte in current protocol version; set to 0 */
uint8_t reserved;
/* Length of data which follows this header */
uint16_t data_len;
} __packed;
#define EC_HOST_RESPONSE_VERSION 3
/* Version 3 response from EC */
/**
* struct ec_host_response - Version 3 response from EC.
* @struct_version: Struct version (=3).
* @checksum: Checksum of response and data; sum of all bytes including
* checksum should total to 0.
* @result: EC's response to the command (separate from communication failure)
* @data_len: Length of data which follows this header.
* @reserved: Unused bytes in current protocol version; set to 0.
*/
struct ec_host_response {
/* Struct version (=3) */
uint8_t struct_version;
/*
* Checksum of response and data; sum of all bytes including checksum
* should total to 0.
*/
uint8_t checksum;
/* Result code (EC_RES_*) */
uint16_t result;
/* Length of data which follows this header */
uint16_t data_len;
/* Unused bytes in current protocol version; set to 0 */
uint16_t reserved;
} __packed;
@ -540,6 +532,10 @@ struct ec_host_response {
*/
#define EC_CMD_PROTO_VERSION 0x00
/**
* struct ec_response_proto_version - Response to the proto version command.
* @version: The protocol version.
*/
struct ec_response_proto_version {
uint32_t version;
} __packed;
@ -550,12 +546,20 @@ struct ec_response_proto_version {
*/
#define EC_CMD_HELLO 0x01
/**
* struct ec_params_hello - Parameters to the hello command.
* @in_data: Pass anything here.
*/
struct ec_params_hello {
uint32_t in_data; /* Pass anything here */
uint32_t in_data;
} __packed;
/**
* struct ec_response_hello - Response to the hello command.
* @out_data: Output will be in_data + 0x01020304.
*/
struct ec_response_hello {
uint32_t out_data; /* Output will be in_data + 0x01020304 */
uint32_t out_data;
} __packed;
/* Get version number */
@ -567,22 +571,37 @@ enum ec_current_image {
EC_IMAGE_RW
};
/**
* struct ec_response_get_version - Response to the get version command.
* @version_string_ro: Null-terminated RO firmware version string.
* @version_string_rw: Null-terminated RW firmware version string.
* @reserved: Unused bytes; was previously RW-B firmware version string.
* @current_image: One of ec_current_image.
*/
struct ec_response_get_version {
/* Null-terminated version strings for RO, RW */
char version_string_ro[32];
char version_string_rw[32];
char reserved[32]; /* Was previously RW-B string */
uint32_t current_image; /* One of ec_current_image */
char reserved[32];
uint32_t current_image;
} __packed;
/* Read test */
#define EC_CMD_READ_TEST 0x03
/**
* struct ec_params_read_test - Parameters for the read test command.
* @offset: Starting value for read buffer.
* @size: Size to read in bytes.
*/
struct ec_params_read_test {
uint32_t offset; /* Starting value for read buffer */
uint32_t size; /* Size to read in bytes */
uint32_t offset;
uint32_t size;
} __packed;
/**
* struct ec_response_read_test - Response to the read test command.
* @data: Data returned by the read test command.
*/
struct ec_response_read_test {
uint32_t data[32];
} __packed;
@ -597,18 +616,27 @@ struct ec_response_read_test {
/* Get chip info */
#define EC_CMD_GET_CHIP_INFO 0x05
/**
* struct ec_response_get_chip_info - Response to the get chip info command.
* @vendor: Null-terminated string for chip vendor.
* @name: Null-terminated string for chip name.
* @revision: Null-terminated string for chip mask version.
*/
struct ec_response_get_chip_info {
/* Null-terminated strings */
char vendor[32];
char name[32];
char revision[32]; /* Mask version */
char revision[32];
} __packed;
/* Get board HW version */
#define EC_CMD_GET_BOARD_VERSION 0x06
/**
* struct ec_response_board_version - Response to the board version command.
* @board_version: A monotonously incrementing number.
*/
struct ec_response_board_version {
uint16_t board_version; /* A monotonously incrementing number. */
uint16_t board_version;
} __packed;
/*
@ -621,27 +649,42 @@ struct ec_response_board_version {
*/
#define EC_CMD_READ_MEMMAP 0x07
/**
* struct ec_params_read_memmap - Parameters for the read memory map command.
* @offset: Offset in memmap (EC_MEMMAP_*).
* @size: Size to read in bytes.
*/
struct ec_params_read_memmap {
uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */
uint8_t size; /* Size to read in bytes */
uint8_t offset;
uint8_t size;
} __packed;
/* Read versions supported for a command */
#define EC_CMD_GET_CMD_VERSIONS 0x08
/**
* struct ec_params_get_cmd_versions - Parameters for the get command versions.
* @cmd: Command to check.
*/
struct ec_params_get_cmd_versions {
uint8_t cmd; /* Command to check */
uint8_t cmd;
} __packed;
/**
* struct ec_params_get_cmd_versions_v1 - Parameters for the get command
* versions (v1)
* @cmd: Command to check.
*/
struct ec_params_get_cmd_versions_v1 {
uint16_t cmd; /* Command to check */
uint16_t cmd;
} __packed;
/**
* struct ec_response_get_cmd_version - Response to the get command versions.
* @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with
* a desired version.
*/
struct ec_response_get_cmd_versions {
/*
* Mask of supported versions; use EC_VER_MASK() to compare with a
* desired version.
*/
uint32_t version_mask;
} __packed;
@ -659,6 +702,11 @@ enum ec_comms_status {
EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */
};
/**
* struct ec_response_get_comms_status - Response to the get comms status
* command.
* @flags: Mask of enum ec_comms_status.
*/
struct ec_response_get_comms_status {
uint32_t flags; /* Mask of enum ec_comms_status */
} __packed;
@ -685,19 +733,19 @@ struct ec_response_test_protocol {
/* EC_RES_IN_PROGRESS may be returned if a command is slow */
#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0)
/**
* struct ec_response_get_protocol_info - Response to the get protocol info.
* @protocol_versions: Bitmask of protocol versions supported (1 << n means
* version n).
* @max_request_packet_size: Maximum request packet size in bytes.
* @max_response_packet_size: Maximum response packet size in bytes.
* @flags: see EC_PROTOCOL_INFO_*
*/
struct ec_response_get_protocol_info {
/* Fields which exist if at least protocol version 3 supported */
/* Bitmask of protocol versions supported (1 << n means version n)*/
uint32_t protocol_versions;
/* Maximum request packet size, in bytes */
uint16_t max_request_packet_size;
/* Maximum response packet size, in bytes */
uint16_t max_response_packet_size;
/* Flags; see EC_PROTOCOL_INFO_* */
uint32_t flags;
} __packed;
@ -708,8 +756,10 @@ struct ec_response_get_protocol_info {
/* The upper byte of .flags tells what to do (nothing means "get") */
#define EC_GSV_SET 0x80000000
/* The lower three bytes of .flags identifies the parameter, if that has
meaning for an individual command. */
/*
* The lower three bytes of .flags identifies the parameter, if that has
* meaning for an individual command.
*/
#define EC_GSV_PARAM_MASK 0x00ffffff
struct ec_params_get_set_value {
@ -810,6 +860,7 @@ enum ec_feature_code {
#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
struct ec_response_get_features {
uint32_t flags[2];
} __packed;
@ -820,24 +871,22 @@ struct ec_response_get_features {
/* Get flash info */
#define EC_CMD_FLASH_INFO 0x10
/* Version 0 returns these fields */
/**
* struct ec_response_flash_info - Response to the flash info command.
* @flash_size: Usable flash size in bytes.
* @write_block_size: Write block size. Write offset and size must be a
* multiple of this.
* @erase_block_size: Erase block size. Erase offset and size must be a
* multiple of this.
* @protect_block_size: Protection block size. Protection offset and size
* must be a multiple of this.
*
* Version 0 returns these fields.
*/
struct ec_response_flash_info {
/* Usable flash size, in bytes */
uint32_t flash_size;
/*
* Write block size. Write offset and size must be a multiple
* of this.
*/
uint32_t write_block_size;
/*
* Erase block size. Erase offset and size must be a multiple
* of this.
*/
uint32_t erase_block_size;
/*
* Protection block size. Protection offset and size must be a
* multiple of this.
*/
uint32_t protect_block_size;
} __packed;
@ -845,7 +894,22 @@ struct ec_response_flash_info {
/* EC flash erases bits to 0 instead of 1 */
#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0)
/*
/**
* struct ec_response_flash_info_1 - Response to the flash info v1 command.
* @flash_size: Usable flash size in bytes.
* @write_block_size: Write block size. Write offset and size must be a
* multiple of this.
* @erase_block_size: Erase block size. Erase offset and size must be a
* multiple of this.
* @protect_block_size: Protection block size. Protection offset and size
* must be a multiple of this.
* @write_ideal_size: Ideal write size in bytes. Writes will be fastest if
* size is exactly this and offset is a multiple of this.
* For example, an EC may have a write buffer which can do
* half-page operations if data is aligned, and a slower
* word-at-a-time write mode.
* @flags: Flags; see EC_FLASH_INFO_*
*
* Version 1 returns the same initial fields as version 0, with additional
* fields following.
*
@ -860,15 +924,7 @@ struct ec_response_flash_info_1 {
uint32_t protect_block_size;
/* Version 1 adds these fields: */
/*
* Ideal write size in bytes. Writes will be fastest if size is
* exactly this and offset is a multiple of this. For example, an EC
* may have a write buffer which can do half-page operations if data is
* aligned, and a slower word-at-a-time write mode.
*/
uint32_t write_ideal_size;
/* Flags; see EC_FLASH_INFO_* */
uint32_t flags;
} __packed;
@ -879,9 +935,14 @@ struct ec_response_flash_info_1 {
*/
#define EC_CMD_FLASH_READ 0x11
/**
* struct ec_params_flash_read - Parameters for the flash read command.
* @offset: Byte offset to read.
* @size: Size to read in bytes.
*/
struct ec_params_flash_read {
uint32_t offset; /* Byte offset to read */
uint32_t size; /* Size to read in bytes */
uint32_t offset;
uint32_t size;
} __packed;
/* Write flash */
@ -891,18 +952,28 @@ struct ec_params_flash_read {
/* Version 0 of the flash command supported only 64 bytes of data */
#define EC_FLASH_WRITE_VER0_SIZE 64
/**
* struct ec_params_flash_write - Parameters for the flash write command.
* @offset: Byte offset to write.
* @size: Size to write in bytes.
*/
struct ec_params_flash_write {
uint32_t offset; /* Byte offset to write */
uint32_t size; /* Size to write in bytes */
uint32_t offset;
uint32_t size;
/* Followed by data to write */
} __packed;
/* Erase flash */
#define EC_CMD_FLASH_ERASE 0x13
/**
* struct ec_params_flash_erase - Parameters for the flash erase command.
* @offset: Byte offset to erase.
* @size: Size to erase in bytes.
*/
struct ec_params_flash_erase {
uint32_t offset; /* Byte offset to erase */
uint32_t size; /* Size to erase in bytes */
uint32_t offset;
uint32_t size;
} __packed;
/*
@ -941,21 +1012,28 @@ struct ec_params_flash_erase {
/* Entile flash code protected when the EC boots */
#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6)
/**
* struct ec_params_flash_protect - Parameters for the flash protect command.
* @mask: Bits in flags to apply.
* @flags: New flags to apply.
*/
struct ec_params_flash_protect {
uint32_t mask; /* Bits in flags to apply */
uint32_t flags; /* New flags to apply */
uint32_t mask;
uint32_t flags;
} __packed;
/**
* struct ec_response_flash_protect - Response to the flash protect command.
* @flags: Current value of flash protect flags.
* @valid_flags: Flags which are valid on this platform. This allows the
* caller to distinguish between flags which aren't set vs. flags
* which can't be set on this platform.
* @writable_flags: Flags which can be changed given the current protection
* state.
*/
struct ec_response_flash_protect {
/* Current value of flash protect flags */
uint32_t flags;
/*
* Flags which are valid on this platform. This allows the caller
* to distinguish between flags which aren't set vs. flags which can't
* be set on this platform.
*/
uint32_t valid_flags;
/* Flags which can be changed given the current protection state */
uint32_t writable_flags;
} __packed;
@ -982,8 +1060,13 @@ enum ec_flash_region {
EC_FLASH_REGION_COUNT,
};
/**
* struct ec_params_flash_region_info - Parameters for the flash region info
* command.
* @region: Flash region; see EC_FLASH_REGION_*
*/
struct ec_params_flash_region_info {
uint32_t region; /* enum ec_flash_region */
uint32_t region;
} __packed;
struct ec_response_flash_region_info {
@ -1094,7 +1177,9 @@ struct rgb_s {
};
#define LB_BATTERY_LEVELS 4
/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
/*
* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
* host command, but the alignment is the same regardless. Keep it that way.
*/
struct lightbar_params_v0 {

View file

@ -1,90 +0,0 @@
/*
* cros_ec_lpc_mec - LPC variant I/O for Microchip EC
*
* Copyright (C) 2016 Google, Inc
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This driver uses the Chrome OS EC byte-level message-based protocol for
* communicating the keyboard state (which keys are pressed) from a keyboard EC
* to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
* but everything else (including deghosting) is done here. The main
* motivation for this is to keep the EC firmware as simple as possible, since
* it cannot be easily upgraded and EC flash/IRAM space is relatively
* expensive.
*/
#ifndef __LINUX_MFD_CROS_EC_MEC_H
#define __LINUX_MFD_CROS_EC_MEC_H
#include <linux/mfd/cros_ec_commands.h>
enum cros_ec_lpc_mec_emi_access_mode {
/* 8-bit access */
ACCESS_TYPE_BYTE = 0x0,
/* 16-bit access */
ACCESS_TYPE_WORD = 0x1,
/* 32-bit access */
ACCESS_TYPE_LONG = 0x2,
/*
* 32-bit access, read or write of MEC_EMI_EC_DATA_B3 causes the
* EC data register to be incremented.
*/
ACCESS_TYPE_LONG_AUTO_INCREMENT = 0x3,
};
enum cros_ec_lpc_mec_io_type {
MEC_IO_READ,
MEC_IO_WRITE,
};
/* Access IO ranges 0x800 thru 0x9ff using EMI interface instead of LPC */
#define MEC_EMI_RANGE_START EC_HOST_CMD_REGION0
#define MEC_EMI_RANGE_END (EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE)
/* EMI registers are relative to base */
#define MEC_EMI_BASE 0x800
#define MEC_EMI_HOST_TO_EC (MEC_EMI_BASE + 0)
#define MEC_EMI_EC_TO_HOST (MEC_EMI_BASE + 1)
#define MEC_EMI_EC_ADDRESS_B0 (MEC_EMI_BASE + 2)
#define MEC_EMI_EC_ADDRESS_B1 (MEC_EMI_BASE + 3)
#define MEC_EMI_EC_DATA_B0 (MEC_EMI_BASE + 4)
#define MEC_EMI_EC_DATA_B1 (MEC_EMI_BASE + 5)
#define MEC_EMI_EC_DATA_B2 (MEC_EMI_BASE + 6)
#define MEC_EMI_EC_DATA_B3 (MEC_EMI_BASE + 7)
/*
* cros_ec_lpc_mec_init
*
* Initialize MEC I/O.
*/
void cros_ec_lpc_mec_init(void);
/*
* cros_ec_lpc_mec_destroy
*
* Cleanup MEC I/O.
*/
void cros_ec_lpc_mec_destroy(void);
/**
* cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port
*
* @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request
* @offset: Base read / write address
* @length: Number of bytes to read / write
* @buf: Destination / source buffer
*
* @return 8-bit checksum of all bytes read / written
*/
u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type,
unsigned int offset, unsigned int length, u8 *buf);
#endif /* __LINUX_MFD_CROS_EC_MEC_H */

View file

@ -1,61 +0,0 @@
/*
* cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller
*
* Copyright (C) 2016 Google, Inc
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This driver uses the Chrome OS EC byte-level message-based protocol for
* communicating the keyboard state (which keys are pressed) from a keyboard EC
* to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
* but everything else (including deghosting) is done here. The main
* motivation for this is to keep the EC firmware as simple as possible, since
* it cannot be easily upgraded and EC flash/IRAM space is relatively
* expensive.
*/
#ifndef __LINUX_MFD_CROS_EC_REG_H
#define __LINUX_MFD_CROS_EC_REG_H
/**
* cros_ec_lpc_read_bytes - Read bytes from a given LPC-mapped address.
* Returns 8-bit checksum of all bytes read.
*
* @offset: Base read address
* @length: Number of bytes to read
* @dest: Destination buffer
*/
u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest);
/**
* cros_ec_lpc_write_bytes - Write bytes to a given LPC-mapped address.
* Returns 8-bit checksum of all bytes written.
*
* @offset: Base write address
* @length: Number of bytes to write
* @msg: Write data buffer
*/
u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg);
/**
* cros_ec_lpc_reg_init
*
* Initialize register I/O.
*/
void cros_ec_lpc_reg_init(void);
/**
* cros_ec_lpc_reg_destroy
*
* Cleanup reg I/O.
*/
void cros_ec_lpc_reg_destroy(void);
#endif /* __LINUX_MFD_CROS_EC_REG_H */

View file

@ -2163,7 +2163,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
void zero_resv_unavail(void);
#else
static inline void zero_resv_unavail(void) {}

View file

@ -633,9 +633,6 @@ typedef struct pglist_data {
struct page_ext *node_page_ext;
#endif
#endif
#ifndef CONFIG_NO_BOOTMEM
struct bootmem_data *bdata;
#endif
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* Must be held any time you expect node_start_pfn, node_present_pages
@ -869,7 +866,7 @@ static inline int is_highmem_idx(enum zone_type idx)
}
/**
* is_highmem - helper function to quickly check if a struct zone is a
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
* to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
* @zone - pointer to struct zone variable

View file

@ -388,6 +388,9 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
extern int of_alias_get_id(struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
extern int of_alias_get_alias_list(const struct of_device_id *matches,
const char *stem, unsigned long *bitmap,
unsigned int nbits);
extern int of_machine_is_compatible(const char *compat);
@ -898,6 +901,13 @@ static inline int of_alias_get_highest_id(const char *stem)
return -ENOSYS;
}
static inline int of_alias_get_alias_list(const struct of_device_id *matches,
const char *stem, unsigned long *bitmap,
unsigned int nbits)
{
return -ENOSYS;
}
static inline int of_machine_is_compatible(const char *compat)
{
return 0;

View file

@ -241,9 +241,9 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
typedef int filler_t(void *, struct page *);
pgoff_t page_cache_next_hole(struct address_space *mapping,
pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
pgoff_t page_cache_prev_hole(struct address_space *mapping,
pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
#define FGP_ACCESSED 0x00000001
@ -363,17 +363,17 @@ static inline unsigned find_get_pages(struct address_space *mapping,
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, int tag, unsigned int nr_pages,
pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages);
static inline unsigned find_get_pages_tag(struct address_space *mapping,
pgoff_t *index, int tag, unsigned int nr_pages,
pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
struct page **pages)
{
return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
nr_pages, pages);
}
unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
int tag, unsigned int nr_entries,
xa_mark_t tag, unsigned int nr_entries,
struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping,

View file

@ -9,6 +9,8 @@
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
#include <linux/xarray.h>
/* 15 pointers + header align the pagevec structure to a power of two */
#define PAGEVEC_SIZE 15
@ -40,12 +42,12 @@ static inline unsigned pagevec_lookup(struct pagevec *pvec,
unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
int tag);
xa_mark_t tag);
unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
int tag, unsigned max_pages);
xa_mark_t tag, unsigned max_pages);
static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, int tag)
struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
{
return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
}

View file

@ -91,8 +91,7 @@
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
extern __PCPU_ATTRS(sec) __typeof__(type) name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) name
__PCPU_ATTRS(sec) __weak __typeof__(type) name
#else
/*
* Normal declaration and definition macros.
@ -101,8 +100,7 @@
extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
__typeof__(type) name
__PCPU_ATTRS(sec) __typeof__(type) name
#endif
/*

View file

@ -24,8 +24,10 @@
#ifndef __ASM_ARCH_OMAP_GPIO_H
#define __ASM_ARCH_OMAP_GPIO_H
#ifndef __ASSEMBLER__
#include <linux/io.h>
#include <linux/platform_device.h>
#endif
#define OMAP1_MPUIO_BASE 0xfffb5000
@ -157,6 +159,7 @@
#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
u16 direction;
@ -205,4 +208,6 @@ struct omap_gpio_platform_data {
int (*get_context_loss_count)(struct device *dev);
};
#endif /* __ASSEMBLER__ */
#endif

View file

@ -1,14 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* shmob_drm.h -- SH Mobile DRM driver
*
* Copyright (C) 2012 Renesas Corporation
*
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __SHMOB_DRM_H__

View file

@ -46,7 +46,6 @@ struct sysc_regbits {
s8 emufree_shift;
};
#define SYSC_QUIRK_RESOURCE_PROVIDER BIT(9)
#define SYSC_QUIRK_LEGACY_IDLE BIT(8)
#define SYSC_QUIRK_RESET_STATUS BIT(7)
#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)

View file

@ -28,34 +28,30 @@
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/xarray.h>
/* Keep unconverted code working */
#define radix_tree_root xarray
#define radix_tree_node xa_node
/*
* The bottom two bits of the slot determine how the remaining bits in the
* slot are interpreted:
*
* 00 - data pointer
* 01 - internal entry
* 10 - exceptional entry
* 11 - this bit combination is currently unused/reserved
* 10 - internal entry
* x1 - value entry
*
* The internal entry may be a pointer to the next level in the tree, a
* sibling entry, or an indicator that the entry in this slot has been moved
* to another location in the tree and the lookup should be restarted. While
* NULL fits the 'data pointer' pattern, it means that there is no entry in
* the tree for this index (no matter what level of the tree it is found at).
* This means that you cannot store NULL in the tree as a value for the index.
* This means that storing a NULL entry in the tree is the same as deleting
* the entry from the tree.
*/
#define RADIX_TREE_ENTRY_MASK 3UL
#define RADIX_TREE_INTERNAL_NODE 1UL
/*
* Most users of the radix tree store pointers but shmem/tmpfs stores swap
* entries in the same tree. They are marked as exceptional entries to
* distinguish them from pointers to struct page.
* EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
*/
#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
#define RADIX_TREE_INTERNAL_NODE 2UL
static inline bool radix_tree_is_internal_node(void *ptr)
{
@ -65,75 +61,32 @@ static inline bool radix_tree_is_internal_node(void *ptr)
/*** radix-tree API starts here ***/
#define RADIX_TREE_MAX_TAGS 3
#ifndef RADIX_TREE_MAP_SHIFT
#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
#endif
#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT
#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
#define RADIX_TREE_TAG_LONGS \
((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS
#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS
#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
RADIX_TREE_MAP_SHIFT))
/*
* @count is the count of every non-NULL element in the ->slots array
* whether that is an exceptional entry, a retry entry, a user pointer,
* a sibling entry or a pointer to the next level of the tree.
* @exceptional is the count of every element in ->slots which is
* either radix_tree_exceptional_entry() or is a sibling entry for an
* exceptional entry.
*/
struct radix_tree_node {
unsigned char shift; /* Bits remaining in each slot */
unsigned char offset; /* Slot offset in parent */
unsigned char count; /* Total entry count */
unsigned char exceptional; /* Exceptional entry count */
struct radix_tree_node *parent; /* Used when ascending tree */
struct radix_tree_root *root; /* The tree we belong to */
union {
struct list_head private_list; /* For tree user */
struct rcu_head rcu_head; /* Used when freeing node */
};
void __rcu *slots[RADIX_TREE_MAP_SIZE];
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
/* The IDR tag is stored in the low bits of the GFP flags */
/* The IDR tag is stored in the low bits of xa_flags */
#define ROOT_IS_IDR ((__force gfp_t)4)
/* The top bits of gfp_mask are used to store the root tags */
/* The top bits of xa_flags are used to store the root tags */
#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
struct radix_tree_root {
spinlock_t xa_lock;
gfp_t gfp_mask;
struct radix_tree_node __rcu *rnode;
};
#define RADIX_TREE_INIT(name, mask) { \
.xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
.gfp_mask = (mask), \
.rnode = NULL, \
}
#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask)
#define RADIX_TREE(name, mask) \
struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
#define INIT_RADIX_TREE(root, mask) \
do { \
spin_lock_init(&(root)->xa_lock); \
(root)->gfp_mask = (mask); \
(root)->rnode = NULL; \
} while (0)
#define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
static inline bool radix_tree_empty(const struct radix_tree_root *root)
{
return root->rnode == NULL;
return root->xa_head == NULL;
}
/**
@ -143,7 +96,6 @@ static inline bool radix_tree_empty(const struct radix_tree_root *root)
* @next_index: one beyond the last index for this chunk
* @tags: bit-mask for tag-iterating
* @node: node that contains current slot
* @shift: shift for the node that holds our slots
*
* This radix tree iterator works in terms of "chunks" of slots. A chunk is a
* subinterval of slots contained within one radix tree leaf node. It is
@ -157,20 +109,8 @@ struct radix_tree_iter {
unsigned long next_index;
unsigned long tags;
struct radix_tree_node *node;
#ifdef CONFIG_RADIX_TREE_MULTIORDER
unsigned int shift;
#endif
};
static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
{
#ifdef CONFIG_RADIX_TREE_MULTIORDER
return iter->shift;
#else
return 0;
#endif
}
/**
* Radix-tree synchronization
*
@ -194,12 +134,11 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
* radix_tree_lookup_slot
* radix_tree_tag_get
* radix_tree_gang_lookup
* radix_tree_gang_lookup_slot
* radix_tree_gang_lookup_tag
* radix_tree_gang_lookup_tag_slot
* radix_tree_tagged
*
* The first 8 functions are able to be called locklessly, using RCU. The
* The first 7 functions are able to be called locklessly, using RCU. The
* caller must ensure calls to these functions are made within rcu_read_lock()
* regions. Other readers (lock-free or otherwise) and modifications may be
* running concurrently.
@ -268,17 +207,6 @@ static inline int radix_tree_deref_retry(void *arg)
return unlikely(radix_tree_is_internal_node(arg));
}
/**
* radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
* @arg: value returned by radix_tree_deref_slot
* Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
*/
static inline int radix_tree_exceptional_entry(void *arg)
{
/* Not unlikely because radix_tree_exception often tested first */
return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
}
/**
* radix_tree_exception - radix_tree_deref_slot returned either exception?
* @arg: value returned by radix_tree_deref_slot
@ -289,47 +217,28 @@ static inline int radix_tree_exception(void *arg)
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
}
int __radix_tree_create(struct radix_tree_root *, unsigned long index,
unsigned order, struct radix_tree_node **nodep,
void __rcu ***slotp);
int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
unsigned order, void *);
static inline int radix_tree_insert(struct radix_tree_root *root,
unsigned long index, void *entry)
{
return __radix_tree_insert(root, index, 0, entry);
}
int radix_tree_insert(struct radix_tree_root *, unsigned long index,
void *);
void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
struct radix_tree_node **nodep, void __rcu ***slotp);
void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
unsigned long index);
typedef void (*radix_tree_update_node_t)(struct radix_tree_node *);
void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
void __rcu **slot, void *entry,
radix_tree_update_node_t update_node);
void __rcu **slot, void *entry);
void radix_tree_iter_replace(struct radix_tree_root *,
const struct radix_tree_iter *, void __rcu **slot, void *entry);
void radix_tree_replace_slot(struct radix_tree_root *,
void __rcu **slot, void *entry);
void __radix_tree_delete_node(struct radix_tree_root *,
struct radix_tree_node *,
radix_tree_update_node_t update_node);
void radix_tree_iter_delete(struct radix_tree_root *,
struct radix_tree_iter *iter, void __rcu **slot);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
void __rcu **slot);
unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
void **results, unsigned long first_index,
unsigned int max_items);
unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
void __rcu ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
@ -337,8 +246,6 @@ void *radix_tree_tag_clear(struct radix_tree_root *,
unsigned long index, unsigned int tag);
int radix_tree_tag_get(const struct radix_tree_root *,
unsigned long index, unsigned int tag);
void radix_tree_iter_tag_set(struct radix_tree_root *,
const struct radix_tree_iter *iter, unsigned int tag);
void radix_tree_iter_tag_clear(struct radix_tree_root *,
const struct radix_tree_iter *iter, unsigned int tag);
unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
@ -354,12 +261,6 @@ static inline void radix_tree_preload_end(void)
preempt_enable();
}
int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
int radix_tree_split(struct radix_tree_root *, unsigned long index,
unsigned new_order);
int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max);
@ -465,7 +366,7 @@ void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
static inline unsigned long
__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
{
return iter->index + (slots << iter_shift(iter));
return iter->index + slots;
}
/**
@ -490,21 +391,9 @@ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return (iter->next_index - iter->index) >> iter_shift(iter);
return iter->next_index - iter->index;
}
#ifdef CONFIG_RADIX_TREE_MULTIORDER
void __rcu **__radix_tree_next_slot(void __rcu **slot,
struct radix_tree_iter *iter, unsigned flags);
#else
/* Can't happen without sibling entries, but the compiler can't tell that */
static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
struct radix_tree_iter *iter, unsigned flags)
{
return slot;
}
#endif
/**
* radix_tree_next_slot - find next slot in chunk
*
@ -563,8 +452,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
return NULL;
found:
if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
return __radix_tree_next_slot(slot, iter, flags);
return slot;
}
@ -583,23 +470,6 @@ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
slot = radix_tree_next_slot(slot, iter, 0))
/**
* radix_tree_for_each_contig - iterate over contiguous slots
*
* @slot: the void** variable for pointer to slot
* @root: the struct radix_tree_root pointer
* @iter: the struct radix_tree_iter pointer
* @start: iteration starting index
*
* @slot points to radix tree slot, @iter->index contains its index.
*/
#define radix_tree_for_each_contig(slot, root, iter, start) \
for (slot = radix_tree_iter_init(iter, start) ; \
slot || (slot = radix_tree_next_chunk(root, iter, \
RADIX_TREE_ITER_CONTIG)) ; \
slot = radix_tree_next_slot(slot, iter, \
RADIX_TREE_ITER_CONTIG))
/**
* radix_tree_for_each_tagged - iterate over tagged slots
*

View file

@ -51,8 +51,8 @@ extern void __rb_insert_augmented(struct rb_node *node,
*
* On insertion, the user must update the augmented information on the path
* leading to the inserted node, then call rb_link_node() as usual and
* rb_augment_inserted() instead of the usual rb_insert_color() call.
* If rb_augment_inserted() rebalances the rbtree, it will callback into
* rb_insert_augmented() instead of the usual rb_insert_color() call.
* If rb_insert_augmented() rebalances the rbtree, it will callback into
* a user provided function to update the augmented information on the
* affected subtrees.
*/

View file

@ -305,14 +305,22 @@ struct fw_rsc_vdev {
struct fw_rsc_vdev_vring vring[0];
} __packed;
struct rproc;
/**
* struct rproc_mem_entry - memory entry descriptor
* @va: virtual address
* @dma: dma address
* @len: length, in bytes
* @da: device address
* @release: release associated memory
* @priv: associated data
* @name: associated memory region name (optional)
* @node: list node
* @rsc_offset: offset in resource table
* @flags: iommu protection flags
* @of_resm_idx: reserved memory phandle index
* @alloc: specific memory allocator function
*/
struct rproc_mem_entry {
void *va;
@ -320,10 +328,15 @@ struct rproc_mem_entry {
int len;
u32 da;
void *priv;
char name[32];
struct list_head node;
u32 rsc_offset;
u32 flags;
u32 of_resm_idx;
int (*alloc)(struct rproc *rproc, struct rproc_mem_entry *mem);
int (*release)(struct rproc *rproc, struct rproc_mem_entry *mem);
};
struct rproc;
struct firmware;
/**
@ -399,6 +412,9 @@ enum rproc_crash_type {
* @node: list node related to the rproc segment list
* @da: device address of the segment
* @size: size of the segment
* @priv: private data associated with the dump_segment
* @dump: custom dump function to fill device memory segment associated
* with coredump
*/
struct rproc_dump_segment {
struct list_head node;
@ -406,6 +422,9 @@ struct rproc_dump_segment {
dma_addr_t da;
size_t size;
void *priv;
void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment,
void *dest);
loff_t offset;
};
@ -439,7 +458,9 @@ struct rproc_dump_segment {
* @cached_table: copy of the resource table
* @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
* @auto_boot: flag to indicate if remote processor should be auto-started
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
*/
struct rproc {
struct list_head node;
@ -472,6 +493,7 @@ struct rproc {
bool has_iommu;
bool auto_boot;
struct list_head dump_segments;
int nb_vdev;
};
/**
@ -499,7 +521,6 @@ struct rproc_subdev {
/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
* @dma: dma address
* @len: length, in bytes
* @da: device address
* @align: vring alignment
@ -509,7 +530,6 @@ struct rproc_subdev {
*/
struct rproc_vring {
void *va;
dma_addr_t dma;
int len;
u32 da;
u32 align;
@ -528,6 +548,7 @@ struct rproc_vring {
* @vdev: the virio device
* @vring: the vrings for this vdev
* @rsc_offset: offset of the vdev's resource entry
* @index: vdev position versus other vdev declared in resource table
*/
struct rproc_vdev {
struct kref refcount;
@ -540,6 +561,7 @@ struct rproc_vdev {
struct virtio_device vdev;
struct rproc_vring vring[RVDEV_NUM_VRINGS];
u32 rsc_offset;
u32 index;
};
struct rproc *rproc_get_by_phandle(phandle phandle);
@ -553,10 +575,29 @@ int rproc_add(struct rproc *rproc);
int rproc_del(struct rproc *rproc);
void rproc_free(struct rproc *rproc);
void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem);
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
void *va, dma_addr_t dma, int len, u32 da,
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
int (*release)(struct rproc *, struct rproc_mem_entry *),
const char *name, ...);
struct rproc_mem_entry *
rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
u32 da, const char *name, ...);
int rproc_boot(struct rproc *rproc);
void rproc_shutdown(struct rproc *rproc);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
int rproc_coredump_add_custom_segment(struct rproc *rproc,
dma_addr_t da, size_t size,
void (*dumpfn)(struct rproc *rproc,
struct rproc_dump_segment *segment,
void *dest),
void *priv);
static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
{

View file

@ -116,7 +116,7 @@ static inline int device_reset_optional(struct device *dev)
* @id: reset line name
*
* Returns a struct reset_control or IS_ERR() condition containing errno.
* If this function is called more then once for the same reset_control it will
* If this function is called more than once for the same reset_control it will
* return -EBUSY.
*
* See reset_control_get_shared for details on shared references to

View file

@ -167,17 +167,12 @@ struct rtc_device {
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
extern struct rtc_device *rtc_device_register(const char *name,
struct device *dev,
const struct rtc_class_ops *ops,
struct module *owner);
extern struct rtc_device *devm_rtc_device_register(struct device *dev,
const char *name,
const struct rtc_class_ops *ops,
struct module *owner);
struct rtc_device *devm_rtc_allocate_device(struct device *dev);
int __rtc_register_device(struct module *owner, struct rtc_device *rtc);
extern void rtc_device_unregister(struct rtc_device *rtc);
extern void devm_rtc_device_unregister(struct device *dev,
struct rtc_device *rtc);
@ -277,4 +272,20 @@ static inline int rtc_nvmem_register(struct rtc_device *rtc,
static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {}
#endif
#ifdef CONFIG_RTC_INTF_SYSFS
int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp);
int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps);
#else
static inline
int rtc_add_group(struct rtc_device *rtc, const struct attribute_group *grp)
{
return 0;
}
static inline
int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
{
return 0;
}
#endif
#endif /* _LINUX_RTC_H_ */

View file

@ -20,7 +20,6 @@ extern unsigned long nr_running(void);
extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
static inline int sched_info_on(void)
{

View file

@ -91,6 +91,8 @@ struct scmi_clk_ops {
* to sustained performance level mapping
* @freq_get: gets the frequency for a given device using sustained frequency
* to sustained performance level mapping
* @est_power_get: gets the estimated power cost for a given performance domain
* at a given frequency
*/
struct scmi_perf_ops {
int (*limits_set)(const struct scmi_handle *handle, u32 domain,
@ -110,6 +112,8 @@ struct scmi_perf_ops {
unsigned long rate, bool poll);
int (*freq_get)(const struct scmi_handle *handle, u32 domain,
unsigned long *rate, bool poll);
int (*est_power_get)(const struct scmi_handle *handle, u32 domain,
unsigned long *rate, unsigned long *power);
};
/**

View file

@ -144,6 +144,8 @@ struct uart_port {
void (*handle_break)(struct uart_port *);
int (*rs485_config)(struct uart_port *,
struct serial_rs485 *rs485);
int (*iso7816_config)(struct uart_port *,
struct serial_iso7816 *iso7816);
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
@ -260,6 +262,7 @@ struct uart_port {
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};

View file

@ -129,9 +129,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
b3 = b->sig[3]; b2 = b->sig[2]; \
r->sig[3] = op(a3, b3); \
r->sig[2] = op(a2, b2); \
/* fall through */ \
case 2: \
a1 = a->sig[1]; b1 = b->sig[1]; \
r->sig[1] = op(a1, b1); \
/* fall through */ \
case 1: \
a0 = a->sig[0]; b0 = b->sig[0]; \
r->sig[0] = op(a0, b0); \
@ -161,7 +163,9 @@ static inline void name(sigset_t *set) \
switch (_NSIG_WORDS) { \
case 4: set->sig[3] = op(set->sig[3]); \
set->sig[2] = op(set->sig[2]); \
/* fall through */ \
case 2: set->sig[1] = op(set->sig[1]); \
/* fall through */ \
case 1: set->sig[0] = op(set->sig[0]); \
break; \
default: \
@ -182,6 +186,7 @@ static inline void sigemptyset(sigset_t *set)
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
/* fall through */
case 1: set->sig[0] = 0;
break;
}
@ -194,6 +199,7 @@ static inline void sigfillset(sigset_t *set)
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
/* fall through */
case 1: set->sig[0] = -1;
break;
}

View file

@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2018 BayLibre, SAS
*/
#ifndef __SOC_MESON_CANVAS_H
#define __SOC_MESON_CANVAS_H
#include <linux/kernel.h>
#define MESON_CANVAS_WRAP_NONE 0x00
#define MESON_CANVAS_WRAP_X 0x01
#define MESON_CANVAS_WRAP_Y 0x02
#define MESON_CANVAS_BLKMODE_LINEAR 0x00
#define MESON_CANVAS_BLKMODE_32x32 0x01
#define MESON_CANVAS_BLKMODE_64x64 0x02
#define MESON_CANVAS_ENDIAN_SWAP16 0x1
#define MESON_CANVAS_ENDIAN_SWAP32 0x3
#define MESON_CANVAS_ENDIAN_SWAP64 0x7
#define MESON_CANVAS_ENDIAN_SWAP128 0xf
struct meson_canvas;
/**
* meson_canvas_get() - get a canvas provider instance
*
* @dev: consumer device pointer
*/
struct meson_canvas *meson_canvas_get(struct device *dev);
/**
* meson_canvas_alloc() - take ownership of a canvas
*
* @canvas: canvas provider instance retrieved from meson_canvas_get()
* @canvas_index: will be filled with the canvas ID
*/
int meson_canvas_alloc(struct meson_canvas *canvas, u8 *canvas_index);
/**
* meson_canvas_free() - remove ownership from a canvas
*
* @canvas: canvas provider instance retrieved from meson_canvas_get()
* @canvas_index: canvas ID that was obtained via meson_canvas_alloc()
*/
int meson_canvas_free(struct meson_canvas *canvas, u8 canvas_index);
/**
* meson_canvas_config() - configure a canvas
*
* @canvas: canvas provider instance retrieved from meson_canvas_get()
* @canvas_index: canvas ID that was obtained via meson_canvas_alloc()
* @addr: physical address to the pixel buffer
* @stride: width of the buffer
* @height: height of the buffer
* @wrap: undocumented
* @blkmode: block mode (linear, 32x32, 64x64)
* @endian: byte swapping (swap16, swap32, swap64, swap128)
*/
int meson_canvas_config(struct meson_canvas *canvas, u8 canvas_index,
u32 addr, u32 stride, u32 height,
unsigned int wrap, unsigned int blkmode,
unsigned int endian);
#endif

View file

@ -70,25 +70,51 @@ struct llcc_slice_config {
/**
* llcc_drv_data - Data associated with the llcc driver
* @regmap: regmap associated with the llcc device
* @bcast_regmap: regmap associated with llcc broadcast offset
* @cfg: pointer to the data structure for slice configuration
* @lock: mutex associated with each slice
* @cfg_size: size of the config data table
* @max_slices: max slices as read from device tree
* @bcast_off: Offset of the broadcast bank
* @num_banks: Number of llcc banks
* @bitmap: Bit map to track the active slice ids
* @offsets: Pointer to the bank offsets array
* @ecc_irq: interrupt for llcc cache error detection and reporting
*/
struct llcc_drv_data {
struct regmap *regmap;
struct regmap *bcast_regmap;
const struct llcc_slice_config *cfg;
struct mutex lock;
u32 cfg_size;
u32 max_slices;
u32 bcast_off;
u32 num_banks;
unsigned long *bitmap;
u32 *offsets;
int ecc_irq;
};
/**
* llcc_edac_reg_data - llcc edac registers data for each error type
* @name: Name of the error
* @synd_reg: Syndrome register address
* @count_status_reg: Status register address to read the error count
* @ways_status_reg: Status register address to read the error ways
* @reg_cnt: Number of registers
* @count_mask: Mask value to get the error count
* @ways_mask: Mask value to get the error ways
* @count_shift: Shift value to get the error count
* @ways_shift: Shift value to get the error ways
*/
struct llcc_edac_reg_data {
char *name;
u64 synd_reg;
u64 count_status_reg;
u64 ways_status_reg;
u32 reg_cnt;
u32 count_mask;
u32 ways_mask;
u8 count_shift;
u8 ways_shift;
};
#if IS_ENABLED(CONFIG_QCOM_LLCC)

View file

@ -67,7 +67,7 @@ struct cache_detail {
struct module * owner;
int hash_size;
struct hlist_head * hash_table;
rwlock_t hash_lock;
spinlock_t hash_lock;
char *name;
void (*cache_put)(struct kref *);
@ -168,8 +168,8 @@ extern const struct file_operations content_file_operations_pipefs;
extern const struct file_operations cache_flush_operations_pipefs;
extern struct cache_head *
sunrpc_cache_lookup(struct cache_detail *detail,
struct cache_head *key, int hash);
sunrpc_cache_lookup_rcu(struct cache_detail *detail,
struct cache_head *key, int hash);
extern struct cache_head *
sunrpc_cache_update(struct cache_detail *detail,
struct cache_head *new, struct cache_head *old, int hash);
@ -186,6 +186,12 @@ static inline struct cache_head *cache_get(struct cache_head *h)
return h;
}
static inline struct cache_head *cache_get_rcu(struct cache_head *h)
{
if (kref_get_unless_zero(&h->ref))
return h;
return NULL;
}
static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
{
@ -224,9 +230,9 @@ extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *);
/* Must store cache_detail in seq_file->private if using next three functions */
extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
extern void *cache_seq_next(struct seq_file *file, void *p, loff_t *pos);
extern void cache_seq_stop(struct seq_file *file, void *p);
extern void *cache_seq_start_rcu(struct seq_file *file, loff_t *pos);
extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos);
extern void cache_seq_stop_rcu(struct seq_file *file, void *p);
extern void qword_add(char **bpp, int *lp, char *str);
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);

View file

@ -113,13 +113,14 @@ struct svcxprt_rdma {
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
#define RPCRDMA_LISTEN_BACKLOG 10
#define RPCRDMA_MAX_REQUESTS 32
/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
* current NFSv4.1 implementation supports one backchannel slot.
/*
* Default connection parameters
*/
#define RPCRDMA_MAX_BC_REQUESTS 2
enum {
RPCRDMA_LISTEN_BACKLOG = 10,
RPCRDMA_MAX_REQUESTS = 64,
RPCRDMA_MAX_BC_REQUESTS = 2,
};
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD

View file

@ -82,6 +82,7 @@ struct auth_domain {
struct hlist_node hash;
char *name;
struct auth_ops *flavour;
struct rcu_head rcu_head;
};
/*

View file

@ -300,17 +300,12 @@ void *workingset_eviction(struct address_space *mapping, struct page *page);
void workingset_refault(struct page *page, void *shadow);
void workingset_activation(struct page *page);
/* Do not use directly, use workingset_lookup_update */
void workingset_update_node(struct radix_tree_node *node);
/* Returns workingset_update_node() if the mapping has shadow entries. */
#define workingset_lookup_update(mapping) \
({ \
radix_tree_update_node_t __helper = workingset_update_node; \
if (dax_mapping(mapping) || shmem_mapping(mapping)) \
__helper = NULL; \
__helper; \
})
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
#define mapping_set_update(xas, mapping) do { \
if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
xas_set_update(xas, workingset_update_node); \
} while (0)
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
@ -409,7 +404,7 @@ extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *page);
extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
extern void __delete_from_swap_cache(struct page *);
extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
extern void delete_from_swap_cache(struct page *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
@ -563,7 +558,8 @@ static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
return -1;
}
static inline void __delete_from_swap_cache(struct page *page)
static inline void __delete_from_swap_cache(struct page *page,
swp_entry_t entry)
{
}

View file

@ -18,9 +18,8 @@
*
* swp_entry_t's are *never* stored anywhere in their arch-dependent format.
*/
#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
(MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
/*
* Store a type+offset into a swp_entry_t in an arch-independent format
@ -29,8 +28,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
{
swp_entry_t ret;
ret.val = (type << SWP_TYPE_SHIFT(ret)) |
(offset & SWP_OFFSET_MASK(ret));
ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
return ret;
}
@ -40,7 +38,7 @@ static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
*/
static inline unsigned swp_type(swp_entry_t entry)
{
return (entry.val >> SWP_TYPE_SHIFT(entry));
return (entry.val >> SWP_TYPE_SHIFT);
}
/*
@ -49,7 +47,7 @@ static inline unsigned swp_type(swp_entry_t entry)
*/
static inline pgoff_t swp_offset(swp_entry_t entry)
{
return entry.val & SWP_OFFSET_MASK(entry);
return entry.val & SWP_OFFSET_MASK;
}
#ifdef CONFIG_MMU
@ -90,16 +88,13 @@ static inline swp_entry_t radix_to_swp_entry(void *arg)
{
swp_entry_t entry;
entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
entry.val = xa_to_value(arg);
return entry;
}
static inline void *swp_to_radix_entry(swp_entry_t entry)
{
unsigned long value;
value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
return xa_mk_value(entry.val);
}
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)

View file

@ -453,6 +453,79 @@ static inline int tee_shm_get_id(struct tee_shm *shm)
*/
struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
/**
* tee_client_open_context() - Open a TEE context
* @start: if not NULL, continue search after this context
* @match: function to check TEE device
* @data: data for match function
* @vers: if not NULL, version data of TEE device of the context returned
*
* This function does an operation similar to open("/dev/teeX") in user space.
* A returned context must be released with tee_client_close_context().
*
* Returns a TEE context of the first TEE device matched by the match()
* callback or an ERR_PTR.
*/
struct tee_context *
tee_client_open_context(struct tee_context *start,
int (*match)(struct tee_ioctl_version_data *,
const void *),
const void *data, struct tee_ioctl_version_data *vers);
/**
* tee_client_close_context() - Close a TEE context
* @ctx: TEE context to close
*
* Note that all sessions previously opened with this context will be
* closed when this function is called.
*/
void tee_client_close_context(struct tee_context *ctx);
/**
* tee_client_get_version() - Query version of TEE
* @ctx: TEE context to TEE to query
* @vers: Pointer to version data
*/
void tee_client_get_version(struct tee_context *ctx,
struct tee_ioctl_version_data *vers);
/**
* tee_client_open_session() - Open a session to a Trusted Application
* @ctx: TEE context
* @arg: Open session arguments, see description of
* struct tee_ioctl_open_session_arg
* @param: Parameters passed to the Trusted Application
*
* Returns < 0 on error else see @arg->ret for result. If @arg->ret
* is TEEC_SUCCESS the session identifier is available in @arg->session.
*/
int tee_client_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
/**
* tee_client_close_session() - Close a session to a Trusted Application
* @ctx: TEE Context
* @session: Session id
*
* Return < 0 on error else 0, regardless the session will not be
* valid after this function has returned.
*/
int tee_client_close_session(struct tee_context *ctx, u32 session);
/**
* tee_client_invoke_func() - Invoke a function in a Trusted Application
* @ctx: TEE Context
* @arg: Invoke arguments, see description of
* struct tee_ioctl_invoke_arg
* @param: Parameters passed to the Trusted Application
*
* Returns < 0 on error else see @arg->ret for result.
*/
int tee_client_invoke_func(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
static inline bool tee_param_is_memref(struct tee_param *param)
{
switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {

View file

@ -575,7 +575,8 @@ extern int bpf_get_kprobe_info(const struct perf_event *event,
bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS
extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
extern int perf_uprobe_init(struct perf_event *event,
unsigned long ref_ctr_offset, bool is_retprobe);
extern void perf_uprobe_destroy(struct perf_event *event);
extern int bpf_get_uprobe_info(const struct perf_event *event,
u32 *fd_type, const char **filename,

View file

@ -123,6 +123,7 @@ extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_mmap(struct vm_area_struct *vma);
@ -160,6 +161,10 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
{
return -ENOSYS;
}
static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc)
{
return -ENOSYS;
}
static inline int
uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
{

View file

@ -135,13 +135,6 @@ extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
int deflt);
int vty_init(const struct file_operations *console_fops);
static inline bool vt_force_oops_output(struct vc_data *vc)
{
if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0)
return true;
return false;
}
extern char vt_dont_switch;
extern int default_utf8;
extern int global_cursor_default;

File diff suppressed because it is too large Load diff