Merge branch 'linus' into release

Conflicts:
	drivers/acpi/debug.c

Signed-off-by: Len Brown <len.brown@intel.com>
This commit is contained in:
Len Brown 2010-08-15 01:06:31 -04:00
commit 95ee46aa86
8188 changed files with 515572 additions and 364084 deletions

View file

@ -39,6 +39,7 @@ header-y += ax25.h
header-y += b1lli.h
header-y += baycom.h
header-y += bfs_fs.h
header-y += blk_types.h
header-y += blkpg.h
header-y += bpqether.h
header-y += bsg.h
@ -210,6 +211,7 @@ unifdef-y += ethtool.h
unifdef-y += eventpoll.h
unifdef-y += signalfd.h
unifdef-y += ext2_fs.h
unifdef-y += fanotify.h
unifdef-y += fb.h
unifdef-y += fcntl.h
unifdef-y += filter.h
@ -276,6 +278,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \
$(srctree)/include/asm-$(SRCARCH)/kvm_para.h),)
unifdef-y += kvm_para.h
endif
unifdef-y += l2tp.h
unifdef-y += llc.h
unifdef-y += loop.h
unifdef-y += lp.h

View file

@ -15,11 +15,13 @@
#ifndef _AHCI_PLATFORM_H
#define _AHCI_PLATFORM_H
#include <linux/compiler.h>
struct device;
struct ata_port_info;
struct ahci_platform_data {
int (*init)(struct device *dev);
int (*init)(struct device *dev, void __iomem *addr);
void (*exit)(struct device *dev);
const struct ata_port_info *ata_port_info;
unsigned int force_port_map;

View file

@ -14,14 +14,19 @@
#ifndef ASMARM_AMBA_H
#define ASMARM_AMBA_H
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/resource.h>
#define AMBA_NR_IRQS 2
struct clk;
struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
u64 dma_mask;
unsigned int periphid;
unsigned int irq[AMBA_NR_IRQS];
@ -59,6 +64,12 @@ struct amba_device *amba_find_device(const char *, struct device *, unsigned int
int amba_request_regions(struct amba_device *, const char *);
void amba_release_regions(struct amba_device *);
#define amba_pclk_enable(d) \
(IS_ERR((d)->pclk) ? 0 : clk_enable((d)->pclk))
#define amba_pclk_disable(d) \
do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
#define amba_config(d) (((d)->periphid >> 24) & 0xff)
#define amba_rev(d) (((d)->periphid >> 20) & 0x0f)
#define amba_manf(d) (((d)->periphid >> 12) & 0xff)

View file

@ -15,9 +15,10 @@
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
* @translate_vdd: a callback function to translate a MMC_VDD_*
* mask into a value to be binary or:ed and written into the
* MMCIPWR register of the block
* @vdd_handler: a callback function to translate a MMC_VDD_*
* mask into a value to be binary (or set some other custom bits
* in MMCIPWR) or:ed and written into the MMCIPWR register of the
* block. May also control external power based on the power_mode.
* @status: if no GPIO read function was given to the block in
* gpio_wp (below) this function will be called to determine
* whether a card is present in the MMC slot or not
@ -29,7 +30,8 @@
struct mmci_platform_data {
unsigned int f_max;
unsigned int ocr_mask;
u32 (*translate_vdd)(struct device *, unsigned int);
u32 (*vdd_handler)(struct device *, unsigned int vdd,
unsigned char power_mode);
unsigned int (*status)(struct device *);
int gpio_wp;
int gpio_cd;

View file

@ -38,10 +38,12 @@
#define UART01x_FR 0x18 /* Flag register (Read only). */
#define UART010_IIR 0x1C /* Interrupt indentification register (Read). */
#define UART010_ICR 0x1C /* Interrupt clear register (Write). */
#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */
#define UART01x_ILPR 0x20 /* IrDA low power counter register. */
#define UART011_IBRD 0x24 /* Integer baud rate divisor register. */
#define UART011_FBRD 0x28 /* Fractional baud rate divisor register. */
#define UART011_LCRH 0x2c /* Line control register. */
#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */
#define UART011_CR 0x30 /* Control register. */
#define UART011_IFLS 0x34 /* Interrupt fifo level select. */
#define UART011_IMSC 0x38 /* Interrupt mask. */
@ -84,6 +86,7 @@
#define UART010_CR_TIE 0x0020
#define UART010_CR_RIE 0x0010
#define UART010_CR_MSIE 0x0008
#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */
#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */
#define UART01x_CR_SIREN 0x0002 /* SIR enable */
#define UART01x_CR_UARTEN 0x0001 /* UART enable */

View file

@ -431,6 +431,14 @@ struct atm_dev *atm_dev_register(const char *type,const struct atmdev_ops *ops,
int number,unsigned long *flags); /* number == -1: pick first available */
struct atm_dev *atm_dev_lookup(int number);
void atm_dev_deregister(struct atm_dev *dev);
/* atm_dev_signal_change
*
* Propagate lower layer signal change in atm_dev->signal to netdevice.
* The event will be sent via a notifier call chain.
*/
void atm_dev_signal_change(struct atm_dev *dev, char signal);
void vcc_insert_socket(struct sock *sk);
@ -510,6 +518,15 @@ void register_atm_ioctl(struct atm_ioctl *);
*/
void deregister_atm_ioctl(struct atm_ioctl *);
/* register_atmdevice_notifier - register atm_dev notify events
*
* Clients like br2684 will register notify events
* Currently we notify of signal found/lost
*/
int register_atmdevice_notifier(struct notifier_block *nb);
void unregister_atmdevice_notifier(struct notifier_block *nb);
#endif /* __KERNEL__ */
#endif

View file

@ -544,7 +544,7 @@ extern int audit_signals;
#define audit_putname(n) do { ; } while (0)
#define __audit_inode(n,d) do { ; } while (0)
#define __audit_inode_child(i,p) do { ; } while (0)
#define audit_inode(n,d) do { ; } while (0)
#define audit_inode(n,d) do { (void)(d); } while (0)
#define audit_inode_child(i,p) do { ; } while (0)
#define audit_core_dumps(i) do { ; } while (0)
#define auditsc_get_stamp(c,t,s) (0)

View file

@ -79,6 +79,7 @@ struct autofs_packet_expire {
#define AUTOFS_IOC_FAIL _IO(0x93,0x61)
#define AUTOFS_IOC_CATATONIC _IO(0x93,0x62)
#define AUTOFS_IOC_PROTOVER _IOR(0x93,0x63,int)
#define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,compat_ulong_t)
#define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93,0x64,unsigned long)
#define AUTOFS_IOC_EXPIRE _IOR(0x93,0x65,struct autofs_packet_expire)

View file

@ -31,6 +31,7 @@ enum bdi_state {
BDI_async_congested, /* The async (write) queue is getting full */
BDI_sync_congested, /* The sync queue is getting full */
BDI_registered, /* bdi_register() was done */
BDI_writeback_running, /* Writeback is in progress */
BDI_unused, /* Available bits start here */
};
@ -45,22 +46,21 @@ enum bdi_stat_item {
#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
struct bdi_writeback {
struct list_head list; /* hangs off the bdi */
struct backing_dev_info *bdi; /* our parent bdi */
struct backing_dev_info *bdi; /* our parent bdi */
unsigned int nr;
unsigned long last_old_flush; /* last old data flush */
unsigned long last_old_flush; /* last old data flush */
unsigned long last_active; /* last time bdi thread was active */
struct task_struct *task; /* writeback task */
struct list_head b_dirty; /* dirty inodes */
struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */
struct task_struct *task; /* writeback thread */
struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
struct list_head b_dirty; /* dirty inodes */
struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */
};
struct backing_dev_info {
struct list_head bdi_list;
struct rcu_head rcu_head;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */
@ -80,8 +80,7 @@ struct backing_dev_info {
unsigned int max_ratio, max_prop_frac;
struct bdi_writeback wb; /* default writeback info for this bdi */
spinlock_t wb_lock; /* protects update side of wb_list */
struct list_head wb_list; /* the flusher threads hanging off this bdi */
spinlock_t wb_lock; /* protects work_list */
struct list_head work_list;
@ -105,9 +104,10 @@ void bdi_unregister(struct backing_dev_info *bdi);
int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
void bdi_start_background_writeback(struct backing_dev_info *bdi);
int bdi_writeback_task(struct bdi_writeback *wb);
int bdi_writeback_thread(void *data);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
void bdi_arm_supers_timer(void);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
extern spinlock_t bdi_lock;
extern struct list_head bdi_list;

View file

@ -9,7 +9,7 @@
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
*
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
@ -28,6 +28,9 @@
#include <asm/io.h>
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
#define BIO_DEBUG
#ifdef BIO_DEBUG
@ -40,154 +43,6 @@
#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
/*
* was unsigned short, but we might as well be ready for > 64kB I/O pages
*/
struct bio_vec {
struct page *bv_page;
unsigned int bv_len;
unsigned int bv_offset;
};
struct bio_set;
struct bio;
struct bio_integrity_payload;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
*/
struct bio {
sector_t bi_sector; /* device address in 512 byte
sectors */
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
unsigned long bi_flags; /* status, command, etc */
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
*/
unsigned short bi_vcnt; /* how many bio_vec's */
unsigned short bi_idx; /* current index into bvl_vec */
/* Number of segments in this BIO after
* physical address coalescing is performed.
*/
unsigned int bi_phys_segments;
unsigned int bi_size; /* residual I/O count */
/*
* To keep track of the max segment size, we account for the
* sizes of the first and last mergeable segments in this bio.
*/
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
unsigned int bi_comp_cpu; /* completion CPU */
atomic_t bi_cnt; /* pin count */
struct bio_vec *bi_io_vec; /* the actual vec list */
bio_end_io_t *bi_end_io;
void *bi_private;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
bio_destructor_t *bi_destructor; /* destructor */
/*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
* MUST obviously be kept at the very end of the bio.
*/
struct bio_vec bi_inline_vecs[0];
};
/*
* bio flags
*/
#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
#define BIO_EOF 2 /* out-out-bounds error */
#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
#define BIO_USER_MAPPED 6 /* contains user pages */
#define BIO_EOPNOTSUPP 7 /* not supported */
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
#define BIO_QUIET 11 /* Make BIO Quiet */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
* top 4 bits of bio flags indicate the pool this bio came from
*/
#define BIO_POOL_BITS (4)
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
/*
* bio bi_rw flags
*
* bit 0 -- data direction
* If not set, bio is a read from device. If set, it's a write to device.
* bit 1 -- fail fast device errors
* bit 2 -- fail fast transport errors
* bit 3 -- fail fast driver errors
* bit 4 -- rw-ahead when set
* bit 5 -- barrier
* Insert a serialization point in the IO queue, forcing previously
* submitted IO to be completed before this one is issued.
* bit 6 -- synchronous I/O hint.
* bit 7 -- Unplug the device immediately after submitting this bio.
* bit 8 -- metadata request
* Used for tracing to differentiate metadata and data IO. May also
* get some preferential treatment in the IO scheduler
* bit 9 -- discard sectors
* Informs the lower level device that this range of sectors is no longer
* used by the file system and may thus be freed by the device. Used
* for flash based storage.
* Don't want driver retries for any fast fail whatever the reason.
* bit 10 -- Tell the IO scheduler not to wait for more requests after this
one has been submitted, even if it is a SYNC request.
*/
enum bio_rw_flags {
BIO_RW,
BIO_RW_FAILFAST_DEV,
BIO_RW_FAILFAST_TRANSPORT,
BIO_RW_FAILFAST_DRIVER,
/* above flags must match REQ_* */
BIO_RW_AHEAD,
BIO_RW_BARRIER,
BIO_RW_SYNCIO,
BIO_RW_UNPLUG,
BIO_RW_META,
BIO_RW_DISCARD,
BIO_RW_NOIDLE,
};
/*
* First four bits must match between bio->bi_rw and rq->cmd_flags, make
* that explicit here.
*/
#define BIO_RW_RQ_MASK 0xf
static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
{
return (bio->bi_rw & (1 << flag)) != 0;
}
/*
* upper 16 bits of bi_rw define the io priority of this bio
*/
@ -211,7 +66,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
#define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD))
#define bio_empty_barrier(bio) \
((bio->bi_rw & REQ_HARDBARRIER) && \
!bio_has_data(bio) && \
!(bio->bi_rw & REQ_DISCARD))
static inline unsigned int bio_cur_bytes(struct bio *bio)
{

196
include/linux/blk_types.h Normal file
View file

@ -0,0 +1,196 @@
/*
* Block data types and constants. Directly include this file only to
* break include dependency loop.
*/
#ifndef __LINUX_BLK_TYPES_H
#define __LINUX_BLK_TYPES_H
#ifdef CONFIG_BLOCK
#include <linux/types.h>
struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
struct block_device;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
/*
* was unsigned short, but we might as well be ready for > 64kB I/O pages
*/
struct bio_vec {
struct page *bv_page;
unsigned int bv_len;
unsigned int bv_offset;
};
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
*/
struct bio {
sector_t bi_sector; /* device address in 512 byte
sectors */
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
unsigned long bi_flags; /* status, command, etc */
unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority
*/
unsigned short bi_vcnt; /* how many bio_vec's */
unsigned short bi_idx; /* current index into bvl_vec */
/* Number of segments in this BIO after
* physical address coalescing is performed.
*/
unsigned int bi_phys_segments;
unsigned int bi_size; /* residual I/O count */
/*
* To keep track of the max segment size, we account for the
* sizes of the first and last mergeable segments in this bio.
*/
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
unsigned int bi_comp_cpu; /* completion CPU */
atomic_t bi_cnt; /* pin count */
struct bio_vec *bi_io_vec; /* the actual vec list */
bio_end_io_t *bi_end_io;
void *bi_private;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
bio_destructor_t *bi_destructor; /* destructor */
/*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
* MUST obviously be kept at the very end of the bio.
*/
struct bio_vec bi_inline_vecs[0];
};
/*
* bio flags
*/
#define BIO_UPTODATE 0 /* ok after I/O completion */
#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
#define BIO_EOF 2 /* out-out-bounds error */
#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
#define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
#define BIO_USER_MAPPED 6 /* contains user pages */
#define BIO_EOPNOTSUPP 7 /* not supported */
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
#define BIO_QUIET 11 /* Make BIO Quiet */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
* top 4 bits of bio flags indicate the pool this bio came from
*/
#define BIO_POOL_BITS (4)
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
#endif /* CONFIG_BLOCK */
/*
* Request flags. For use in the cmd_flags field of struct request, and in
* bi_rw of struct bio. Note that some flags are only valid in either one.
*/
enum rq_flag_bits {
/* common flags */
__REQ_WRITE, /* not set, read. set, write */
__REQ_FAILFAST_DEV, /* no driver retries of device errors */
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */
__REQ_DISCARD, /* request to discard sectors */
__REQ_NOIDLE, /* don't anticipate more IO after this one */
/* bio only flags */
__REQ_UNPLUG, /* unplug the immediately after submission */
__REQ_RAHEAD, /* read ahead, can fail anytime */
/* request only flags */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_FUA, /* forced unit access */
__REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */
__REQ_DONTPREP, /* don't call prep for this one */
__REQ_QUEUED, /* uses queueing */
__REQ_ELVPRIV, /* elevator private data attached */
__REQ_FAILED, /* set if the request failed */
__REQ_QUIET, /* don't worry about errors */
__REQ_PREEMPT, /* set for "ide_preempt" requests */
__REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */
__REQ_FLUSH, /* request for cache flush */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
__REQ_NR_BITS, /* stops here */
};
#define REQ_WRITE (1 << __REQ_WRITE)
#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META)
#define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
REQ_META| REQ_DISCARD | REQ_NOIDLE)
#define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_FUA (1 << __REQ_FUA)
#define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED)
#define REQ_DONTPREP (1 << __REQ_DONTPREP)
#define REQ_QUEUED (1 << __REQ_QUEUED)
#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
#define REQ_FAILED (1 << __REQ_FAILED)
#define REQ_QUIET (1 << __REQ_QUIET)
#define REQ_PREEMPT (1 << __REQ_PREEMPT)
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
#define REQ_FLUSH (1 << __REQ_FLUSH)
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE)
#endif /* __LINUX_BLK_TYPES_H */

View file

@ -60,7 +60,6 @@ enum rq_cmd_type_bits {
REQ_TYPE_PM_RESUME, /* resume request */
REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
REQ_TYPE_SPECIAL, /* driver defined type */
REQ_TYPE_LINUX_BLOCK, /* generic block layer message */
/*
* for ATA/ATAPI devices. this really doesn't belong here, ide should
* use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
@ -70,84 +69,6 @@ enum rq_cmd_type_bits {
REQ_TYPE_ATA_PC,
};
/*
* For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
* sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
* SCSI cdb.
*
* 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
* typically to differentiate REQ_TYPE_SPECIAL requests.
*
*/
enum {
REQ_LB_OP_EJECT = 0x40, /* eject request */
REQ_LB_OP_FLUSH = 0x41, /* flush request */
};
/*
* request type modified bits. first four bits match BIO_RW* bits, important
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */
__REQ_FAILFAST_DEV, /* no driver retries of device errors */
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
/* above flags must match BIO_RW_* */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_FUA, /* forced unit access */
__REQ_NOMERGE, /* don't touch this for merging */
__REQ_STARTED, /* drive already may have started this one */
__REQ_DONTPREP, /* don't call prep for this one */
__REQ_QUEUED, /* uses queueing */
__REQ_ELVPRIV, /* elevator private data attached */
__REQ_FAILED, /* set if the request failed */
__REQ_QUIET, /* don't worry about errors */
__REQ_PREEMPT, /* set for "ide_preempt" requests */
__REQ_ORDERED_COLOR, /* is before or after barrier */
__REQ_RW_SYNC, /* request is sync (sync write or read) */
__REQ_ALLOCED, /* request came from our alloc pool */
__REQ_RW_META, /* metadata io request */
__REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */
__REQ_NOIDLE, /* Don't anticipate more IO after this one */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_NR_BITS, /* stops here */
};
#define REQ_RW (1 << __REQ_RW)
#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_SORTED (1 << __REQ_SORTED)
#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_FUA (1 << __REQ_FUA)
#define REQ_NOMERGE (1 << __REQ_NOMERGE)
#define REQ_STARTED (1 << __REQ_STARTED)
#define REQ_DONTPREP (1 << __REQ_DONTPREP)
#define REQ_QUEUED (1 << __REQ_QUEUED)
#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
#define REQ_FAILED (1 << __REQ_FAILED)
#define REQ_QUIET (1 << __REQ_QUIET)
#define REQ_PREEMPT (1 << __REQ_PREEMPT)
#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
#define REQ_ALLOCED (1 << __REQ_ALLOCED)
#define REQ_RW_META (1 << __REQ_RW_META)
#define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
REQ_FAILFAST_DRIVER)
#define BLK_MAX_CDB 16
/*
@ -264,6 +185,7 @@ struct request_pm_state
typedef void (request_fn_proc) (struct request_queue *q);
typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
@ -275,7 +197,6 @@ struct bvec_merge_data {
};
typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
struct bio_vec *);
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
typedef int (lld_busy_fn) (struct request_queue *q);
@ -346,9 +267,9 @@ struct request_queue
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
prep_rq_fn *prep_rq_fn;
unprep_rq_fn *unprep_rq_fn;
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
@ -467,11 +388,14 @@ struct request_queue
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
(1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP))
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
static inline int queue_is_locked(struct request_queue *q)
{
@ -596,38 +520,28 @@ enum {
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
#define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV)
#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \
blk_failfast_transport(rq) || \
blk_failfast_driver(rq))
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
#define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET)
#define blk_account_rq(rq) \
(((rq)->cmd_flags & REQ_STARTED) && \
((rq)->cmd_type == REQ_TYPE_FS || \
((rq)->cmd_flags & REQ_DISCARD)))
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
#define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
#define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
#define blk_pm_request(rq) \
(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
(rq)->cmd_type == REQ_TYPE_PM_RESUME)
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED)
#define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER)
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
/* rq->queuelist of dequeued request must be list_empty() */
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
@ -641,7 +555,7 @@ enum {
*/
static inline bool rw_is_sync(unsigned int rw_flags)
{
return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
}
static inline bool rq_is_sync(struct request *rq)
@ -649,9 +563,6 @@ static inline bool rq_is_sync(struct request *rq)
return rw_is_sync(rq->cmd_flags);
}
#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
static inline int blk_queue_full(struct request_queue *q, int sync)
{
if (sync)
@ -684,7 +595,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
#define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(blk_discard_rq(rq) || blk_fs_request((rq))))
(((rq)->cmd_flags & REQ_DISCARD) || \
(rq)->cmd_type == REQ_TYPE_FS))
/*
* q->prep_rq_fn return values
@ -709,7 +621,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_HIGH -1ULL
#endif
#define BLK_BOUNCE_ANY (-1ULL)
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
/*
* default timeout for SG_IO if none specified
@ -781,6 +693,8 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *,
gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@ -915,6 +829,7 @@ extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
extern void blk_abort_queue(struct request_queue *);
extern void blk_unprep_request(struct request *);
/*
* Access functions for manipulating queue properties
@ -959,6 +874,7 @@ extern int blk_queue_dma_drain(struct request_queue *q,
extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
@ -966,7 +882,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
extern int blk_queue_ordered(struct request_queue *, unsigned);
extern bool blk_do_ordered(struct request_queue *, struct request **);
extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);
@ -1005,10 +921,12 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
}
enum{
BLKDEV_WAIT, /* wait for completion */
BLKDEV_BARRIER, /*issue request with barrier */
BLKDEV_BARRIER, /* issue request with barrier */
BLKDEV_SECURE, /* secure discard */
};
#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
#define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER)
#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE)
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
unsigned long);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
@ -1020,7 +938,7 @@ static inline int sb_issue_discard(struct super_block *sb,
{
block <<= (sb->s_blocksize_bits - 9);
nr_blocks <<= (sb->s_blocksize_bits - 9);
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL,
return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS,
BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER);
}
@ -1333,7 +1251,6 @@ static inline int blk_integrity_rq(struct request *rq)
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
int (*release) (struct gendisk *, fmode_t);
int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*direct_access) (struct block_device *, sector_t,

View file

@ -5,6 +5,7 @@
#ifdef __KERNEL__
#include <linux/blkdev.h>
#include <linux/relay.h>
#include <linux/compat.h>
#endif
/*
@ -220,11 +221,26 @@ static inline int blk_trace_init_sysfs(struct device *dev)
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_COMPAT
struct compat_blk_user_trace_setup {
char name[32];
u16 act_mask;
u32 buf_size;
u32 buf_nr;
compat_u64 start_lba;
compat_u64 end_lba;
u32 pid;
};
#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
#endif
#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK)
static inline int blk_cmd_buf_len(struct request *rq)
{
return blk_pc_request(rq) ? rq->cmd_len * 3 : 1;
return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1;
}
extern void blk_dump_cmd(char *buf, struct request *rq);

View file

@ -1,6 +1,13 @@
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM57780 0x03625d90
#define PHY_BCM_OUI_MASK 0xfffffc00

View file

@ -203,12 +203,10 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
int block_read_full_page(struct page*, get_block_t*);
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
unsigned long from);
int block_write_begin_newtrunc(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page **, void **, get_block_t*);
int block_write_begin(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page **, void **, get_block_t*);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
unsigned flags, struct page **pagep, get_block_t *get_block);
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block);
int block_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page *, void *);
@ -217,9 +215,6 @@ int generic_write_end(struct file *, struct address_space *,
struct page *, void *);
void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
int cont_write_begin_newtrunc(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **,
get_block_t *, loff_t *);
int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page **, void **,
get_block_t *, loff_t *);
@ -230,12 +225,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
void block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
int file_fsync(struct file *, int);
int nobh_write_begin_newtrunc(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
struct page **, void **, get_block_t*);
int nobh_write_begin(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
struct page **, void **, get_block_t*);
int nobh_write_end(struct file *, struct address_space *,
loff_t, unsigned, unsigned,
@ -314,15 +304,10 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
bh->b_size = sb->s_blocksize;
}
/*
* Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
* __wait_on_buffer() just to trip a debug check. Because debug code in inline
* functions is bloaty.
*/
static inline void wait_on_buffer(struct buffer_head *bh)
{
might_sleep();
if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
if (buffer_locked(bh))
__wait_on_buffer(bh);
}

View file

@ -62,6 +62,7 @@ enum caif_channel_priority {
* @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing.
* @CAIFPROTO_UTIL: Utility (Psock) channel.
* @CAIFPROTO_RFM: Remote File Manager
* @CAIFPROTO_DEBUG: Debug link
*
* This enum defines the CAIF Channel type to be used. This defines
* the service to connect to on the modem.
@ -72,6 +73,7 @@ enum caif_protocol_type {
CAIFPROTO_DATAGRAM_LOOP,
CAIFPROTO_UTIL,
CAIFPROTO_RFM,
CAIFPROTO_DEBUG,
_CAIFPROTO_MAX
};
#define CAIFPROTO_MAX _CAIFPROTO_MAX
@ -83,6 +85,28 @@ enum caif_protocol_type {
enum caif_at_type {
CAIF_ATTYPE_PLAIN = 2
};
/**
* enum caif_debug_type - Content selection for debug connection
* @CAIF_DEBUG_TRACE_INTERACTIVE: Connection will contain
* both trace and interactive debug.
* @CAIF_DEBUG_TRACE: Connection contains trace only.
* @CAIF_DEBUG_INTERACTIVE: Connection to interactive debug.
*/
enum caif_debug_type {
CAIF_DEBUG_TRACE_INTERACTIVE = 0,
CAIF_DEBUG_TRACE,
CAIF_DEBUG_INTERACTIVE,
};
/**
* enum caif_debug_service - Debug Service Endpoint
* @CAIF_RADIO_DEBUG_SERVICE: Debug service on the Radio sub-system
* @CAIF_APP_DEBUG_SERVICE: Debug for the applications sub-system
*/
enum caif_debug_service {
CAIF_RADIO_DEBUG_SERVICE = 1,
CAIF_APP_DEBUG_SERVICE
};
/**
* struct sockaddr_caif - the sockaddr structure for CAIF sockets.
@ -109,6 +133,12 @@ enum caif_at_type {
*
* @u.rfm.volume: Volume to mount.
*
* @u.dbg: Applies when family = CAIFPROTO_DEBUG.
*
* @u.dbg.type: Type of debug connection to set up
* (caif_debug_type).
*
* @u.dbg.service: Service sub-system to connect (caif_debug_service
* Description:
* This structure holds the connect parameters used for setting up a
* CAIF Channel. It defines the service to connect to on the modem.
@ -130,6 +160,10 @@ struct sockaddr_caif {
__u32 connection_id;
char volume[16];
} rfm; /* CAIFPROTO_RFM */
struct {
__u8 type; /* type:enum caif_debug_type */
__u8 service; /* service:caif_debug_service */
} dbg; /* CAIFPROTO_DEBUG */
} u;
};

View file

@ -0,0 +1,20 @@
/*
* Copyright (C) 2010 Marc Kleine-Budde <kernel@pengutronix.de>
*
* This file is released under the GPLv2
*
*/
#ifndef __CAN_PLATFORM_FLEXCAN_H
#define __CAN_PLATFORM_FLEXCAN_H
/**
* struct flexcan_platform_data - flex CAN controller platform data
* @transceiver_enable: - called to power on/off the transceiver
*
*/
struct flexcan_platform_data {
void (*transceiver_switch)(int enable);
};
#endif /* __CAN_PLATFORM_FLEXCAN_H */

View file

@ -49,9 +49,6 @@ typedef struct __user_cap_data_struct {
} __user *cap_user_data_t;
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
#define VFS_CAP_REVISION_MASK 0xFF000000
#define VFS_CAP_REVISION_SHIFT 24
#define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK

View file

@ -578,6 +578,7 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
int cgroup_attach_task_current_cg(struct task_struct *);
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@ -634,6 +635,12 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
return -EINVAL;
}
/* No cgroups - nothing to do */
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
{
return 0;
}
#endif /* !CONFIG_CGROUPS */
#endif /* _LINUX_CGROUP_H */

View file

@ -292,6 +292,8 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
*/
extern int
__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
extern void
__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq);
static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
{
@ -303,6 +305,15 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
return __clocksource_register_scale(cs, 1000, khz);
}
static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz)
{
__clocksource_updatefreq_scale(cs, 1, hz);
}
static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
{
__clocksource_updatefreq_scale(cs, 1000, khz);
}
static inline void
clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
@ -313,11 +324,13 @@ clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void
update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult);
update_vsyscall(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
#else
static inline void
update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult)
update_vsyscall(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult)
{
}

View file

@ -86,9 +86,9 @@ struct upc_req {
wait_queue_head_t uc_sleep; /* process' wait queue */
};
#define REQ_ASYNC 0x1
#define REQ_READ 0x2
#define REQ_WRITE 0x4
#define REQ_ABORT 0x8
#define CODA_REQ_ASYNC 0x1
#define CODA_REQ_READ 0x2
#define CODA_REQ_WRITE 0x4
#define CODA_REQ_ABORT 0x8
#endif

View file

@ -331,7 +331,7 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename,
asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filename,
struct compat_timespec __user *t, int flags);
asmlinkage long compat_sys_signalfd(int ufd,
@ -348,9 +348,9 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
const int __user *nodes,
int __user *status,
int flags);
asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename,
asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filename,
struct compat_timeval __user *t);
asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
asmlinkage long compat_sys_newfstatat(unsigned int dfd, const char __user * filename,
struct compat_stat __user *statbuf,
int flag);
asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,

View file

@ -35,8 +35,7 @@
(typeof(ptr)) (__ptr + (off)); })
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) \
BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0])))
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
/*
* Force always-inline if the user requests it so via the .config,

View file

@ -16,6 +16,7 @@
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
# define __rcu
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
@ -34,6 +35,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __release(x) (void)0
# define __cond_lock(x,c) (c)
# define __percpu
# define __rcu
#endif
#ifdef __KERNEL__

View file

@ -55,6 +55,16 @@ struct consw {
void (*con_invert_region)(struct vc_data *, u16 *, int);
u16 *(*con_screen_pos)(struct vc_data *, int);
unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
/*
* Prepare the console for the debugger. This includes, but is not
* limited to, unblanking the console, loading an appropriate
* palette, and allowing debugger generated output.
*/
int (*con_debug_enter)(struct vc_data *);
/*
* Restore the console to its pre-debug state as closely as possible.
*/
int (*con_debug_leave)(struct vc_data *);
};
extern const struct consw *conswitchp;
@ -69,6 +79,14 @@ int register_con_driver(const struct consw *csw, int first, int last);
int unregister_con_driver(const struct consw *csw);
int take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
#ifdef CONFIG_HW_CONSOLE
int con_debug_enter(struct vc_data *vc);
int con_debug_leave(void);
#else
#define con_debug_enter(vc) (0)
#define con_debug_leave() (0)
#endif
/* scroll */
#define SM_UP (1)
#define SM_DOWN (2)

View file

@ -21,6 +21,8 @@ struct vt_struct;
#define NPAR 16
struct vc_data {
struct tty_port port; /* Upper level data */
unsigned short vc_num; /* Console number */
unsigned int vc_cols; /* [#] Console size */
unsigned int vc_rows;
@ -56,7 +58,6 @@ struct vc_data {
/* VT terminal data */
unsigned int vc_state; /* Escape sequence parser state */
unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */
struct tty_struct *vc_tty; /* TTY we are attached to */
/* data for manual vt switching */
struct vt_mode vt_mode;
struct pid *vt_pid;
@ -105,6 +106,7 @@ struct vc_data {
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
unsigned long vc_uni_pagedir;
unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
/* additional information is in vt_kern.h */
};

View file

@ -48,6 +48,33 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
/*
* CPU notifier priorities.
*/
enum {
/*
* SCHED_ACTIVE marks a cpu which is coming up active during
* CPU_ONLINE and CPU_DOWN_FAILED and must be the first
* notifier. CPUSET_ACTIVE adjusts cpuset according to
* cpu_active mask right after SCHED_ACTIVE. During
* CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
* ordered in the similar way.
*
* This ordering guarantees consistent cpu_active mask and
* migration behavior to all cpu notifiers.
*/
CPU_PRI_SCHED_ACTIVE = INT_MAX,
CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
CPU_PRI_CPUSET_INACTIVE = INT_MIN,
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
/* prepare workqueues for other notifiers */
CPU_PRI_WORKQUEUE = 5,
};
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)

View file

@ -196,11 +196,6 @@ extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
int lock_policy_rwsem_read(int cpu);
int lock_policy_rwsem_write(int cpu);
void unlock_policy_rwsem_read(int cpu);
void unlock_policy_rwsem_write(int cpu);
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *

View file

@ -52,6 +52,7 @@ struct cpuidle_state {
#define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */
#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */
#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */
#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
@ -84,6 +85,7 @@ struct cpuidle_state_kobj {
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
unsigned int power_specified:1;
unsigned int cpu;
int last_residency;
@ -97,6 +99,8 @@ struct cpuidle_device {
struct completion kobj_unregister;
void *governor_data;
struct cpuidle_state *safe_state;
int (*prepare) (struct cpuidle_device *dev);
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);

View file

@ -20,6 +20,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@ -132,6 +133,11 @@ static inline void set_mems_allowed(nodemask_t nodemask)
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{

View file

@ -315,6 +315,8 @@ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *__d_path(const struct path *path, struct path *root, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *d_path_with_unreachable(const struct path *, char *, int);
extern char *__dentry_path(struct dentry *, char *, int);
extern char *dentry_path(struct dentry *, char *, int);
/* Allocation counts.. */

View file

@ -45,6 +45,7 @@ extern unsigned long lpj_fine;
void calibrate_delay(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
void usleep_range(unsigned long min, unsigned long max);
static inline void ssleep(unsigned int seconds)
{

View file

@ -22,7 +22,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
union map_info {
void *ptr;
unsigned long long ll;
unsigned flush_request;
unsigned target_request_nr;
};
/*
@ -174,12 +174,18 @@ struct dm_target {
* A number of zero-length barrier requests that will be submitted
* to the target for the purpose of flushing cache.
*
* The request number will be placed in union map_info->flush_request.
* The request number will be placed in union map_info->target_request_nr.
* It is a responsibility of the target driver to remap these requests
* to the real underlying devices.
*/
unsigned num_flush_requests;
/*
* The number of discard requests that will be submitted to the
* target. map_info->request_nr is used just like num_flush_requests.
*/
unsigned num_discard_requests;
/* target specific data */
void *private;
@ -392,6 +398,12 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
#define dm_array_too_big(fixed, obj, num) \
((num) > (UINT_MAX - (fixed)) / (obj))
/*
* Sector offset taken relative to the start of the target instead of
* relative to the start of the device.
*/
#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
static inline sector_t to_sector(unsigned long n)
{
return (n >> SECTOR_SHIFT);

View file

@ -84,9 +84,8 @@ struct device *bus_find_device_by_name(struct bus_type *bus,
struct device *start,
const char *name);
int __must_check bus_for_each_drv(struct bus_type *bus,
struct device_driver *start, void *data,
int (*fn)(struct device_driver *, void *));
int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
@ -110,10 +109,12 @@ extern int bus_unregister_notifier(struct bus_type *bus,
*/
#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
#define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */
#define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be
#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be
bound */
#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */
#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be
unbound */
#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000005 /* driver is unbound
#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound
from the device */
extern struct kset *bus_get_kset(struct bus_type *bus);
@ -551,7 +552,7 @@ extern int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, char *new_name);
extern int device_rename(struct device *dev, const char *new_name);
extern int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
extern const char *device_get_devnode(struct device *dev,
@ -638,43 +639,103 @@ extern void sysdev_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);
#define dev_printk(level, dev, format, arg...) \
printk(level "%s %s: " format , dev_driver_string(dev) , \
dev_name(dev) , ## arg)
#define dev_emerg(dev, format, arg...) \
dev_printk(KERN_EMERG , dev , format , ## arg)
#define dev_alert(dev, format, arg...) \
dev_printk(KERN_ALERT , dev , format , ## arg)
#define dev_crit(dev, format, arg...) \
dev_printk(KERN_CRIT , dev , format , ## arg)
#define dev_err(dev, format, arg...) \
dev_printk(KERN_ERR , dev , format , ## arg)
#define dev_warn(dev, format, arg...) \
dev_printk(KERN_WARNING , dev , format , ## arg)
#define dev_notice(dev, format, arg...) \
dev_printk(KERN_NOTICE , dev , format , ## arg)
#define dev_info(dev, format, arg...) \
dev_printk(KERN_INFO , dev , format , ## arg)
#ifdef CONFIG_PRINTK
extern int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
extern int dev_emerg(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_alert(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_crit(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_err(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_warn(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_notice(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int _dev_info(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
#else
static inline int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
static inline int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
{ return 0; }
static inline int dev_emerg(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
static inline int dev_emerg(const struct device *dev, const char *fmt, ...)
{ return 0; }
static inline int dev_crit(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
static inline int dev_crit(const struct device *dev, const char *fmt, ...)
{ return 0; }
static inline int dev_alert(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
static inline int dev_alert(const struct device *dev, const char *fmt, ...)
{ return 0; }
static inline int dev_err(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
static inline int dev_err(const struct device *dev, const char *fmt, ...)
{ return 0; }
static inline int dev_warn(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
static inline int dev_warn(const struct device *dev, const char *fmt, ...)
{ return 0; }
static inline int dev_notice(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
static inline int dev_notice(const struct device *dev, const char *fmt, ...)
{ return 0; }
static inline int _dev_info(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
static inline int _dev_info(const struct device *dev, const char *fmt, ...)
{ return 0; }
#endif
/*
* Stupid hackaround for existing uses of non-printk uses dev_info
*
* Note that the definition of dev_info below is actually _dev_info
* and a macro is used to avoid redefining dev_info
*/
#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
#if defined(DEBUG)
#define dev_dbg(dev, format, arg...) \
dev_printk(KERN_DEBUG , dev , format , ## arg)
dev_printk(KERN_DEBUG, dev, format, ##arg)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define dev_dbg(dev, format, ...) do { \
#define dev_dbg(dev, format, ...) \
do { \
dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
} while (0)
#else
#define dev_dbg(dev, format, arg...) \
({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
#define dev_dbg(dev, format, arg...) \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, format, ##arg); \
0; \
})
#endif
#ifdef VERBOSE_DEBUG
#define dev_vdbg dev_dbg
#else
#define dev_vdbg(dev, format, arg...) \
({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
#define dev_vdbg(dev, format, arg...) \
({ \
if (0) \
dev_printk(KERN_DEBUG, dev, format, ##arg); \
0; \
})
#endif
/*

View file

@ -11,6 +11,7 @@
#include <linux/types.h>
#define DM_DIR "mapper" /* Slashes not supported */
#define DM_CONTROL_NODE "control"
#define DM_MAX_TYPE_NAME 16
#define DM_NAME_LEN 128
#define DM_UUID_LEN 129
@ -266,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 17
#define DM_VERSION_MINOR 18
#define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2010-03-05)"
#define DM_VERSION_EXTRA "-ioctl (2010-06-29)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */

View file

@ -142,6 +142,16 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
return -EIO;
}
#ifdef CONFIG_HAS_DMA
static inline int dma_get_cache_alignment(void)
{
#ifdef ARCH_DMA_MINALIGN
return ARCH_DMA_MINALIGN;
#endif
return 1;
}
#endif
/* flags for the coherent memory api */
#define DMA_MEMORY_MAP 0x01
#define DMA_MEMORY_IO 0x02

View file

@ -114,11 +114,17 @@ enum dma_ctrl_flags {
* @DMA_TERMINATE_ALL: terminate all ongoing transfers
* @DMA_PAUSE: pause ongoing transfers
* @DMA_RESUME: resume paused transfer
* @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
* that need to runtime reconfigure the slave channels (as opposed to passing
* configuration data in statically from the platform). An additional
* argument of struct dma_slave_config must be passed in with this
* command.
*/
enum dma_ctrl_cmd {
DMA_TERMINATE_ALL,
DMA_PAUSE,
DMA_RESUME,
DMA_SLAVE_CONFIG,
};
/**
@ -199,6 +205,71 @@ struct dma_chan_dev {
atomic_t *idr_ref;
};
/**
* enum dma_slave_buswidth - defines bus with of the DMA slave
* device, source or target buses
*/
enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
};
/**
* struct dma_slave_config - dma slave channel runtime config
* @direction: whether the data shall go in or out on this slave
* channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
* legal values, DMA_BIDIRECTIONAL is not acceptable since we
* need to differentiate source and target addresses.
* @src_addr: this is the physical address where DMA slave data
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
* should be written (TX), if the source is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
* Legal values: 1, 2, 4, 8.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
* units of the src_addr_width member, not bytes) that can be sent
* in one burst to the device. Typically something like half the
* FIFO depth on I/O peripherals so you don't overflow it. This
* may or may not be applicable on memory sources.
* @dst_maxburst: same as src_maxburst but for destination target
* mutatis mutandis.
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
* The DMA device/engine has to provide support for an additional
* command in the channel config interface, DMA_SLAVE_CONFIG
* and this struct will then be passed in as an argument to the
* DMA engine device_control() function.
*
* The rationale for adding configuration information to this struct
* is as follows: if it is likely that most DMA slave controllers in
* the world will support the configuration option, then make it
* generic. If not: if it is fixed so that it be sent in static from
* the platform data, then prefer to do that. Else, if it is neither
* fixed at runtime, nor generic enough (such as bus mastership on
* some CPU family and whatnot) then create a custom slave config
* struct and pass that, then make this config a member of that
* struct, if applicable.
*/
struct dma_slave_config {
enum dma_data_direction direction;
dma_addr_t src_addr;
dma_addr_t dst_addr;
enum dma_slave_buswidth src_addr_width;
enum dma_slave_buswidth dst_addr_width;
u32 src_maxburst;
u32 dst_maxburst;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
{
return dev_name(&chan->dev->device);

View file

@ -20,6 +20,7 @@ enum dmi_device_type {
DMI_DEV_TYPE_SAS,
DMI_DEV_TYPE_IPMI = -1,
DMI_DEV_TYPE_OEM_STRING = -2,
DMI_DEV_TYPE_DEV_ONBOARD = -3,
};
struct dmi_header {
@ -37,6 +38,14 @@ struct dmi_device {
#ifdef CONFIG_DMI
struct dmi_dev_onboard {
struct dmi_device dev;
int instance;
int segment;
int bus;
int devfn;
};
extern int dmi_check_system(const struct dmi_system_id *list);
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
extern const char * dmi_get_system_info(int field);

View file

@ -28,6 +28,7 @@ struct dnotify_struct {
FS_CREATE | FS_DN_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
extern int dir_notify_enable;
extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long);

View file

@ -0,0 +1,34 @@
/*
* DNS Resolver upcall management for CIFS DFS and AFS
* Handles host name to IP address resolution and DNS query for AFSDB RR.
*
* Copyright (c) International Business Machines Corp., 2008
* Author(s): Steve French (sfrench@us.ibm.com)
* Wang Lei (wang840925@gmail.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _LINUX_DNS_RESOLVER_H
#define _LINUX_DNS_RESOLVER_H
#ifdef __KERNEL__
extern int dns_query(const char *type, const char *name, size_t namelen,
const char *options, char **_result, time_t *_expiry);
#endif /* KERNEL */
#endif /* _LINUX_DNS_RESOLVER_H */

View file

@ -49,7 +49,7 @@
#define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */
typedef struct fs_disk_quota {
__s8 d_version; /* version of this structure */
__s8 d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */
__s8 d_flags; /* FS_{USER,PROJ,GROUP}_QUOTA */
__u16 d_fieldmask; /* field specifier */
__u32 d_id; /* user, project, or group ID */
__u64 d_blk_hardlimit;/* absolute limit on disk blks */
@ -119,18 +119,18 @@ typedef struct fs_disk_quota {
#define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT)
/*
* Various flags related to quotactl(2). Only relevant to XFS filesystems.
* Various flags related to quotactl(2).
*/
#define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */
#define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */
#define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */
#define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */
#define XFS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */
#define XFS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */
#define FS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */
#define FS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */
#define FS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */
#define FS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */
#define FS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */
#define FS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */
#define XFS_USER_QUOTA (1<<0) /* user quota type */
#define XFS_PROJ_QUOTA (1<<1) /* project quota type */
#define XFS_GROUP_QUOTA (1<<2) /* group quota type */
#define FS_USER_QUOTA (1<<0) /* user quota type */
#define FS_PROJ_QUOTA (1<<1) /* project quota type */
#define FS_GROUP_QUOTA (1<<2) /* group quota type */
/*
* fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system.
@ -151,7 +151,7 @@ typedef struct fs_qfilestat {
typedef struct fs_quota_stat {
__s8 qs_version; /* version number for future changes */
__u16 qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */
__u16 qs_flags; /* FS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */
__s8 qs_pad; /* unused */
fs_qfilestat_t qs_uquota; /* user quota storage information */
fs_qfilestat_t qs_gquota; /* group quota storage information */

View file

@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.8"
#define REL_VERSION "8.3.8.1"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 94

View file

@ -78,10 +78,11 @@ NL_PACKET(syncer_conf, 8,
NL_INTEGER( 30, T_MAY_IGNORE, rate)
NL_INTEGER( 31, T_MAY_IGNORE, after)
NL_INTEGER( 32, T_MAY_IGNORE, al_extents)
NL_INTEGER( 71, T_MAY_IGNORE, dp_volume)
NL_INTEGER( 72, T_MAY_IGNORE, dp_interval)
NL_INTEGER( 73, T_MAY_IGNORE, throttle_th)
NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th)
/* NL_INTEGER( 71, T_MAY_IGNORE, dp_volume)
* NL_INTEGER( 72, T_MAY_IGNORE, dp_interval)
* NL_INTEGER( 73, T_MAY_IGNORE, throttle_th)
* NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th)
* feature will be reimplemented differently with 8.3.9 */
NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX)
NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32)
NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX)

View file

@ -62,6 +62,7 @@ typedef enum fe_caps {
FE_CAN_8VSB = 0x200000,
FE_CAN_16VSB = 0x400000,
FE_HAS_EXTENDED_CAPS = 0x800000, /* We need more bitspace for newer APIs, indicate this. */
FE_CAN_TURBO_FEC = 0x8000000, /* frontend supports "turbo fec modulation" */
FE_CAN_2G_MODULATION = 0x10000000, /* frontend supports "2nd generation modulation" (DVB-S2) */
FE_NEEDS_BENDING = 0x20000000, /* not supported anymore, don't use (frontend requires frequency bending) */
FE_CAN_RECOVER = 0x40000000, /* frontend can recover from a cable unplug automatically */

View file

@ -24,6 +24,6 @@
#define _DVBVERSION_H_
#define DVB_API_VERSION 5
#define DVB_API_VERSION_MINOR 1
#define DVB_API_VERSION_MINOR 2
#endif /*_DVBVERSION_H_*/

View file

@ -30,6 +30,7 @@
#define PCI_EEPROM_WIDTH_93C46 6
#define PCI_EEPROM_WIDTH_93C56 8
#define PCI_EEPROM_WIDTH_93C66 8
#define PCI_EEPROM_WIDTH_93C86 8
#define PCI_EEPROM_WIDTH_OPCODE 3
#define PCI_EEPROM_WRITE_OPCODE 0x05
#define PCI_EEPROM_READ_OPCODE 0x06

View file

@ -126,6 +126,20 @@ static inline void random_ether_addr(u8 *addr)
addr [0] |= 0x02; /* set local assignment bit (IEEE802) */
}
/**
* dev_hw_addr_random - Create random MAC and set device flag
* @dev: pointer to net_device structure
* @hwaddr: Pointer to a six-byte array containing the Ethernet address
*
* Generate random MAC to be used by a device and set addr_assign_type
* so the state can be read by sysfs and be used by udev.
*/
static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr)
{
dev->addr_assign_type |= NET_ADDR_RANDOM;
random_ether_addr(hwaddr);
}
/**
* compare_ether_addr - Compare two Ethernet addresses
* @addr1: Pointer to a six-byte array containing the Ethernet address

View file

@ -386,6 +386,15 @@ struct ethtool_rxnfc {
__u32 rule_locs[0];
};
struct ethtool_rxfh_indir {
__u32 cmd;
/* On entry, this is the array size of the user buffer. On
* return from ETHTOOL_GRXFHINDIR, this is the array size of
* the hardware indirection table. */
__u32 size;
__u32 ring_index[0]; /* ring/queue index for each hash value */
};
struct ethtool_rx_ntuple_flow_spec {
__u32 flow_type;
union {
@ -459,7 +468,7 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data);
u32 ethtool_op_get_ufo(struct net_device *dev);
int ethtool_op_set_ufo(struct net_device *dev, u32 data);
u32 ethtool_op_get_flags(struct net_device *dev);
int ethtool_op_set_flags(struct net_device *dev, u32 data);
int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
void ethtool_ntuple_flush(struct net_device *dev);
/**
@ -578,6 +587,10 @@ struct ethtool_ops {
int (*set_rx_ntuple)(struct net_device *,
struct ethtool_rx_ntuple *);
int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
int (*get_rxfh_indir)(struct net_device *,
struct ethtool_rxfh_indir *);
int (*set_rxfh_indir)(struct net_device *,
const struct ethtool_rxfh_indir *);
};
#endif /* __KERNEL__ */
@ -588,29 +601,29 @@ struct ethtool_ops {
#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */
#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */
#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */
#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */
#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */
#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */
#define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */
#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */
#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */
#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */
#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
* (ethtool_value) */
#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
* (ethtool_value). */
#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */
#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
@ -621,8 +634,8 @@ struct ethtool_ops {
#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */
#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */
#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */
#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */
#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */
#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */
#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */
#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
@ -639,6 +652,8 @@ struct ethtool_ops {
#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */
#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */
#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@ -647,18 +662,18 @@ struct ethtool_ops {
/* Indicates what features are supported by the interface. */
#define SUPPORTED_10baseT_Half (1 << 0)
#define SUPPORTED_10baseT_Full (1 << 1)
#define SUPPORTED_100baseT_Half (1 << 2)
#define SUPPORTED_100baseT_Full (1 << 3)
#define SUPPORTED_100baseT_Half (1 << 2)
#define SUPPORTED_100baseT_Full (1 << 3)
#define SUPPORTED_1000baseT_Half (1 << 4)
#define SUPPORTED_1000baseT_Full (1 << 5)
#define SUPPORTED_Autoneg (1 << 6)
#define SUPPORTED_TP (1 << 7)
#define SUPPORTED_AUI (1 << 8)
#define SUPPORTED_MII (1 << 9)
#define SUPPORTED_FIBRE (1 << 10)
#define SUPPORTED_FIBRE (1 << 10)
#define SUPPORTED_BNC (1 << 11)
#define SUPPORTED_10000baseT_Full (1 << 12)
#define SUPPORTED_Pause (1 << 13)
#define SUPPORTED_Pause (1 << 13)
#define SUPPORTED_Asym_Pause (1 << 14)
#define SUPPORTED_2500baseX_Full (1 << 15)
#define SUPPORTED_Backplane (1 << 16)
@ -668,8 +683,8 @@ struct ethtool_ops {
#define SUPPORTED_10000baseR_FEC (1 << 20)
/* Indicates what features are advertised by the interface. */
#define ADVERTISED_10baseT_Half (1 << 0)
#define ADVERTISED_10baseT_Full (1 << 1)
#define ADVERTISED_10baseT_Half (1 << 0)
#define ADVERTISED_10baseT_Full (1 << 1)
#define ADVERTISED_100baseT_Half (1 << 2)
#define ADVERTISED_100baseT_Full (1 << 3)
#define ADVERTISED_1000baseT_Half (1 << 4)
@ -708,12 +723,12 @@ struct ethtool_ops {
#define DUPLEX_FULL 0x01
/* Which connector port. */
#define PORT_TP 0x00
#define PORT_TP 0x00
#define PORT_AUI 0x01
#define PORT_MII 0x02
#define PORT_FIBRE 0x03
#define PORT_BNC 0x04
#define PORT_DA 0x05
#define PORT_DA 0x05
#define PORT_NONE 0xef
#define PORT_OTHER 0xff
@ -727,7 +742,7 @@ struct ethtool_ops {
/* Enable or disable autonegotiation. If this is set to enable,
* the forced link modes above are completely ignored.
*/
#define AUTONEG_DISABLE 0x00
#define AUTONEG_DISABLE 0x00
#define AUTONEG_ENABLE 0x01
/* Mode MDI or MDI-X */

View file

@ -400,7 +400,6 @@ struct ext3_inode {
#define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
#define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */
#define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
#define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */
#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
#define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
#define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
@ -896,7 +895,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
extern struct inode *ext3_iget(struct super_block *, unsigned long);
extern int ext3_write_inode (struct inode *, struct writeback_control *);
extern int ext3_setattr (struct dentry *, struct iattr *);
extern void ext3_delete_inode (struct inode *);
extern void ext3_evict_inode (struct inode *);
extern int ext3_sync_inode (handle_t *, struct inode *);
extern void ext3_discard_reservation (struct inode *);
extern void ext3_dirty_inode(struct inode *);

105
include/linux/fanotify.h Normal file
View file

@ -0,0 +1,105 @@
#ifndef _LINUX_FANOTIFY_H
#define _LINUX_FANOTIFY_H
#include <linux/types.h>
/* the following events that user-space can register for */
#define FAN_ACCESS 0x00000001 /* File was accessed */
#define FAN_MODIFY 0x00000002 /* File was modified */
#define FAN_CLOSE_WRITE 0x00000008 /* Unwrittable file closed */
#define FAN_CLOSE_NOWRITE 0x00000010 /* Writtable file closed */
#define FAN_OPEN 0x00000020 /* File was opened */
#define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */
/* FIXME currently Q's have no limit.... */
#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
/* helper events */
#define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */
/* flags used for fanotify_init() */
#define FAN_CLOEXEC 0x00000001
#define FAN_NONBLOCK 0x00000002
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK)
/* flags used for fanotify_modify_mark() */
#define FAN_MARK_ADD 0x00000001
#define FAN_MARK_REMOVE 0x00000002
#define FAN_MARK_DONT_FOLLOW 0x00000004
#define FAN_MARK_ONLYDIR 0x00000008
#define FAN_MARK_MOUNT 0x00000010
#define FAN_MARK_IGNORED_MASK 0x00000020
#define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040
#define FAN_MARK_FLUSH 0x00000080
#define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\
FAN_MARK_REMOVE |\
FAN_MARK_DONT_FOLLOW |\
FAN_MARK_ONLYDIR |\
FAN_MARK_MOUNT |\
FAN_MARK_IGNORED_MASK |\
FAN_MARK_IGNORED_SURV_MODIFY)
/*
* All of the events - we build the list by hand so that we can add flags in
* the future and not break backward compatibility. Apps will get only the
* events that they originally wanted. Be sure to add new events here!
*/
#define FAN_ALL_EVENTS (FAN_ACCESS |\
FAN_MODIFY |\
FAN_CLOSE |\
FAN_OPEN)
/*
* All events which require a permission response from userspace
*/
#define FAN_ALL_PERM_EVENTS (FAN_OPEN_PERM |\
FAN_ACCESS_PERM)
#define FAN_ALL_OUTGOING_EVENTS (FAN_ALL_EVENTS |\
FAN_ALL_PERM_EVENTS |\
FAN_Q_OVERFLOW)
#define FANOTIFY_METADATA_VERSION 1
struct fanotify_event_metadata {
__u32 event_len;
__u32 vers;
__s32 fd;
__u64 mask;
__s64 pid;
} __attribute__ ((packed));
struct fanotify_response {
__s32 fd;
__u32 response;
} __attribute__ ((packed));
/* Legit userspace responses to a _PERM event */
#define FAN_ALLOW 0x01
#define FAN_DENY 0x02
/* Helper functions to deal with fanotify_event_metadata buffers */
#define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
#define FAN_EVENT_NEXT(meta, len) ((len) -= (meta)->event_len, \
(struct fanotify_event_metadata*)(((char *)(meta)) + \
(meta)->event_len))
#define FAN_EVENT_OK(meta, len) ((long)(len) >= (long)FAN_EVENT_METADATA_LEN && \
(long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \
(long)(meta)->event_len <= (long)(len))
#ifdef __KERNEL__
struct fanotify_wait {
struct fsnotify_event *event;
__s32 fd;
};
#endif /* __KERNEL__ */
#endif /* _LINUX_FANOTIFY_H */

View file

@ -3,6 +3,9 @@
#include <linux/types.h>
#include <linux/i2c.h>
#ifdef __KERNEL__
#include <linux/kgdb.h>
#endif /* __KERNEL__ */
/* Definitions of frame buffers */
@ -607,6 +610,12 @@ struct fb_deferred_io {
* LOCKING NOTE: those functions must _ALL_ be called with the console
* semaphore held, this is the only suitable locking mechanism we have
* in 2.6. Some may be called at interrupt time at this point though.
*
* The exception to this is the debug related hooks. Putting the fb
* into a debug state (e.g. flipping to the kernel console) and restoring
* it must be done in a lock-free manner, so low level drivers should
* keep track of the initial console (if applicable) and may need to
* perform direct, unlocked hardware writes in these hooks.
*/
struct fb_ops {
@ -676,6 +685,10 @@ struct fb_ops {
/* teardown any resources to do with this framebuffer */
void (*fb_destroy)(struct fb_info *info);
/* called at KDB enter and leave time to prepare the console */
int (*fb_debug_enter)(struct fb_info *info);
int (*fb_debug_leave)(struct fb_info *info);
};
#ifdef CONFIG_FB_TILEBLITTING
@ -812,6 +825,10 @@ struct fb_tile_ops {
*/
#define FBINFO_BE_MATH 0x100000
/* report to the VT layer that this fb driver can accept forced console
output like oopses */
#define FBINFO_CAN_FORCE_OUTPUT 0x200000
struct fb_info {
int node;
int flags;

View file

@ -11,6 +11,7 @@
#include <linux/rcupdate.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <asm/atomic.h>

View file

@ -91,6 +91,54 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
#define BPF_TAX 0x00
#define BPF_TXA 0x80
enum {
BPF_S_RET_K = 0,
BPF_S_RET_A,
BPF_S_ALU_ADD_K,
BPF_S_ALU_ADD_X,
BPF_S_ALU_SUB_K,
BPF_S_ALU_SUB_X,
BPF_S_ALU_MUL_K,
BPF_S_ALU_MUL_X,
BPF_S_ALU_DIV_X,
BPF_S_ALU_AND_K,
BPF_S_ALU_AND_X,
BPF_S_ALU_OR_K,
BPF_S_ALU_OR_X,
BPF_S_ALU_LSH_K,
BPF_S_ALU_LSH_X,
BPF_S_ALU_RSH_K,
BPF_S_ALU_RSH_X,
BPF_S_ALU_NEG,
BPF_S_LD_W_ABS,
BPF_S_LD_H_ABS,
BPF_S_LD_B_ABS,
BPF_S_LD_W_LEN,
BPF_S_LD_W_IND,
BPF_S_LD_H_IND,
BPF_S_LD_B_IND,
BPF_S_LD_IMM,
BPF_S_LDX_W_LEN,
BPF_S_LDX_B_MSH,
BPF_S_LDX_IMM,
BPF_S_MISC_TAX,
BPF_S_MISC_TXA,
BPF_S_ALU_DIV_K,
BPF_S_LD_MEM,
BPF_S_LDX_MEM,
BPF_S_ST,
BPF_S_STX,
BPF_S_JMP_JA,
BPF_S_JMP_JEQ_K,
BPF_S_JMP_JEQ_X,
BPF_S_JMP_JGE_K,
BPF_S_JMP_JGE_X,
BPF_S_JMP_JGT_K,
BPF_S_JMP_JGT_X,
BPF_S_JMP_JSET_K,
BPF_S_JMP_JSET_X,
};
#ifndef BPF_MAXINSNS
#define BPF_MAXINSNS 4096
#endif

View file

@ -30,12 +30,18 @@
#include <linux/types.h>
#include <linux/firewire-constants.h>
#define FW_CDEV_EVENT_BUS_RESET 0x00
#define FW_CDEV_EVENT_RESPONSE 0x01
#define FW_CDEV_EVENT_REQUEST 0x02
#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
#define FW_CDEV_EVENT_BUS_RESET 0x00
#define FW_CDEV_EVENT_RESPONSE 0x01
#define FW_CDEV_EVENT_REQUEST 0x02
#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
/* available since kernel version 2.6.36 */
#define FW_CDEV_EVENT_REQUEST2 0x06
#define FW_CDEV_EVENT_PHY_PACKET_SENT 0x07
#define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08
#define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09
/**
* struct fw_cdev_event_common - Common part of all fw_cdev_event_ types
@ -68,6 +74,10 @@ struct fw_cdev_event_common {
* This event is sent when the bus the device belongs to goes through a bus
* reset. It provides information about the new bus configuration, such as
* new node ID for this device, new root ID, and others.
*
* If @bm_node_id is 0xffff right after bus reset it can be reread by an
* %FW_CDEV_IOC_GET_INFO ioctl after bus manager selection was finished.
* Kernels with ABI version < 4 do not set @bm_node_id.
*/
struct fw_cdev_event_bus_reset {
__u64 closure;
@ -82,8 +92,9 @@ struct fw_cdev_event_bus_reset {
/**
* struct fw_cdev_event_response - Sent when a response packet was received
* @closure: See &fw_cdev_event_common;
* set by %FW_CDEV_IOC_SEND_REQUEST ioctl
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST
* or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST
* or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE
* @rcode: Response code returned by the remote node
* @length: Data length, i.e. the response's payload size in bytes
@ -93,6 +104,11 @@ struct fw_cdev_event_bus_reset {
* sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses
* carrying data (read and lock responses) follows immediately and can be
* accessed through the @data field.
*
* The event is also generated after conclusions of transactions that do not
* involve response packets. This includes unified write transactions,
* broadcast write transactions, and transmission of asynchronous stream
* packets. @rcode indicates success or failure of such transmissions.
*/
struct fw_cdev_event_response {
__u64 closure;
@ -103,11 +119,46 @@ struct fw_cdev_event_response {
};
/**
* struct fw_cdev_event_request - Sent on incoming request to an address region
* struct fw_cdev_event_request - Old version of &fw_cdev_event_request2
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST
* @tcode: See &fw_cdev_event_request2
* @offset: See &fw_cdev_event_request2
* @handle: See &fw_cdev_event_request2
* @length: See &fw_cdev_event_request2
* @data: See &fw_cdev_event_request2
*
* This event is sent instead of &fw_cdev_event_request2 if the kernel or
* the client implements ABI version <= 3.
*
* Unlike &fw_cdev_event_request2, the sender identity cannot be established,
* broadcast write requests cannot be distinguished from unicast writes, and
* @tcode of lock requests is %TCODE_LOCK_REQUEST.
*
* Requests to the FCP_REQUEST or FCP_RESPONSE register are responded to as
* with &fw_cdev_event_request2, except in kernel 2.6.32 and older which send
* the response packet of the client's %FW_CDEV_IOC_SEND_RESPONSE ioctl.
*/
struct fw_cdev_event_request {
__u64 closure;
__u32 type;
__u32 tcode;
__u64 offset;
__u32 handle;
__u32 length;
__u32 data[0];
};
/**
* struct fw_cdev_event_request2 - Sent on incoming request to an address region
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2
* @tcode: Transaction code of the incoming request
* @offset: The offset into the 48-bit per-node address space
* @source_node_id: Sender node ID
* @destination_node_id: Destination node ID
* @card: The index of the card from which the request came
* @generation: Bus generation in which the request is valid
* @handle: Reference to the kernel-side pending request
* @length: Data length, i.e. the request's payload size in bytes
* @data: Incoming data, if any
@ -120,12 +171,42 @@ struct fw_cdev_event_response {
*
* The payload data for requests carrying data (write and lock requests)
* follows immediately and can be accessed through the @data field.
*
* Unlike &fw_cdev_event_request, @tcode of lock requests is one of the
* firewire-core specific %TCODE_LOCK_MASK_SWAP...%TCODE_LOCK_VENDOR_DEPENDENT,
* i.e. encodes the extended transaction code.
*
* @card may differ from &fw_cdev_get_info.card because requests are received
* from all cards of the Linux host. @source_node_id, @destination_node_id, and
* @generation pertain to that card. Destination node ID and bus generation may
* therefore differ from the corresponding fields of the last
* &fw_cdev_event_bus_reset.
*
* @destination_node_id may also differ from the current node ID because of a
* non-local bus ID part or in case of a broadcast write request. Note, a
* client must call an %FW_CDEV_IOC_SEND_RESPONSE ioctl even in case of a
* broadcast write request; the kernel will then release the kernel-side pending
* request but will not actually send a response packet.
*
* In case of a write request to FCP_REQUEST or FCP_RESPONSE, the kernel already
* sent a write response immediately after the request was received; in this
* case the client must still call an %FW_CDEV_IOC_SEND_RESPONSE ioctl to
* release the kernel-side pending request, though another response won't be
* sent.
*
* If the client subsequently needs to initiate requests to the sender node of
* an &fw_cdev_event_request2, it needs to use a device file with matching
* card index, node ID, and generation for outbound requests.
*/
struct fw_cdev_event_request {
struct fw_cdev_event_request2 {
__u64 closure;
__u32 type;
__u32 tcode;
__u64 offset;
__u32 source_node_id;
__u32 destination_node_id;
__u32 card;
__u32 generation;
__u32 handle;
__u32 length;
__u32 data[0];
@ -141,26 +222,43 @@ struct fw_cdev_event_request {
* @header: Stripped headers, if any
*
* This event is sent when the controller has completed an &fw_cdev_iso_packet
* with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers
* stripped of all packets up until and including the interrupt packet are
* returned in the @header field. The amount of header data per packet is as
* specified at iso context creation by &fw_cdev_create_iso_context.header_size.
* with the %FW_CDEV_ISO_INTERRUPT bit set.
*
* In version 1 of this ABI, header data consisted of the 1394 isochronous
* packet header, followed by quadlets from the packet payload if
* &fw_cdev_create_iso_context.header_size > 4.
* Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
*
* In version 2 of this ABI, header data consist of the 1394 isochronous
* packet header, followed by a timestamp quadlet if
* &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the
* packet payload if &fw_cdev_create_iso_context.header_size > 8.
* In version 3 and some implementations of version 2 of the ABI, &header_length
* is a multiple of 4 and &header contains timestamps of all packets up until
* the interrupt packet. The format of the timestamps is as described below for
* isochronous reception. In version 1 of the ABI, &header_length was 0.
*
* Isochronous receive events (context type %FW_CDEV_ISO_CONTEXT_RECEIVE):
*
* The headers stripped of all packets up until and including the interrupt
* packet are returned in the @header field. The amount of header data per
* packet is as specified at iso context creation by
* &fw_cdev_create_iso_context.header_size.
*
* Hence, _interrupt.header_length / _context.header_size is the number of
* packets received in this interrupt event. The client can now iterate
* through the mmap()'ed DMA buffer according to this number of packets and
* to the buffer sizes as the client specified in &fw_cdev_queue_iso.
*
* Since version 2 of this ABI, the portion for each packet in _interrupt.header
* consists of the 1394 isochronous packet header, followed by a timestamp
* quadlet if &fw_cdev_create_iso_context.header_size > 4, followed by quadlets
* from the packet payload if &fw_cdev_create_iso_context.header_size > 8.
*
* Format of 1394 iso packet header: 16 bits data_length, 2 bits tag, 6 bits
* channel, 4 bits tcode, 4 bits sy, in big endian byte order.
* data_length is the actual received size of the packet without the four
* 1394 iso packet header bytes.
*
* Format of timestamp: 16 bits invalid, 3 bits cycleSeconds, 13 bits
* cycleCount, in big endian byte order.
*
* In version 1 of the ABI, no timestamp quadlet was inserted; instead, payload
* data followed directly after the 1394 is header if header_size > 4.
* Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2.
*
* Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel,
* 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp:
* 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte
* order.
*/
struct fw_cdev_event_iso_interrupt {
__u64 closure;
@ -170,6 +268,43 @@ struct fw_cdev_event_iso_interrupt {
__u32 header[0];
};
/**
* struct fw_cdev_event_iso_interrupt_mc - An iso buffer chunk was completed
* @closure: See &fw_cdev_event_common;
* set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
* @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
* @completed: Offset into the receive buffer; data before this offest is valid
*
* This event is sent in multichannel contexts (context type
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
* chunks that have the %FW_CDEV_ISO_INTERRUPT bit set. Whether this happens
* when a packet is completed and/or when a buffer chunk is completed depends
* on the hardware implementation.
*
* The buffer is continuously filled with the following data, per packet:
* - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt,
* but in little endian byte order,
* - packet payload (as many bytes as specified in the data_length field of
* the 1394 iso packet header) in big endian byte order,
* - 0...3 padding bytes as needed to align the following trailer quadlet,
* - trailer quadlet, containing the reception timestamp as described at
* &fw_cdev_event_iso_interrupt, but in little endian byte order.
*
* Hence the per-packet size is data_length (rounded up to a multiple of 4) + 8.
* When processing the data, stop before a packet that would cross the
* @completed offset.
*
* A packet near the end of a buffer chunk will typically spill over into the
* next queued buffer chunk. It is the responsibility of the client to check
* for this condition, assemble a broken-up packet from its parts, and not to
* re-queue any buffer chunks in which as yet unread packet parts reside.
*/
struct fw_cdev_event_iso_interrupt_mc {
__u64 closure;
__u32 type;
__u32 completed;
};
/**
* struct fw_cdev_event_iso_resource - Iso resources were allocated or freed
* @closure: See &fw_cdev_event_common;
@ -199,16 +334,46 @@ struct fw_cdev_event_iso_resource {
__s32 bandwidth;
};
/**
* struct fw_cdev_event_phy_packet - A PHY packet was transmitted or received
* @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET
* or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl
* @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED
* @rcode: %RCODE_..., indicates success or failure of transmission
* @length: Data length in bytes
* @data: Incoming data
*
* If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty,
* except in case of a ping packet: Then, @length is 4, and @data[0] is the
* ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE.
*
* If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data
* consists of the two PHY packet quadlets, in host byte order.
*/
struct fw_cdev_event_phy_packet {
__u64 closure;
__u32 type;
__u32 rcode;
__u32 length;
__u32 data[0];
};
/**
* union fw_cdev_event - Convenience union of fw_cdev_event_ types
* @common: Valid for all types
* @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
* @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
* @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
* @iso_resource: Valid if @common.type ==
* @common: Valid for all types
* @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET
* @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE
* @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
* @request2: Valid if @common.type == %FW_CDEV_EVENT_REQUEST2
* @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
* @iso_interrupt_mc: Valid if @common.type ==
* %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
* @iso_resource: Valid if @common.type ==
* %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or
* %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
* @phy_packet: Valid if @common.type ==
* %FW_CDEV_EVENT_PHY_PACKET_SENT or
* %FW_CDEV_EVENT_PHY_PACKET_RECEIVED
*
* Convenience union for userspace use. Events could be read(2) into an
* appropriately aligned char buffer and then cast to this union for further
@ -223,8 +388,11 @@ union fw_cdev_event {
struct fw_cdev_event_bus_reset bus_reset;
struct fw_cdev_event_response response;
struct fw_cdev_event_request request;
struct fw_cdev_event_request2 request2; /* added in 2.6.36 */
struct fw_cdev_event_iso_interrupt iso_interrupt;
struct fw_cdev_event_iso_resource iso_resource;
struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */
struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */
struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */
};
/* available since kernel version 2.6.22 */
@ -256,23 +424,46 @@ union fw_cdev_event {
/* available since kernel version 2.6.34 */
#define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2)
/* available since kernel version 2.6.36 */
#define FW_CDEV_IOC_SEND_PHY_PACKET _IOWR('#', 0x15, struct fw_cdev_send_phy_packet)
#define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets)
#define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels)
/*
* FW_CDEV_VERSION History
* ABI version history
* 1 (2.6.22) - initial version
* (2.6.24) - added %FW_CDEV_IOC_GET_CYCLE_TIMER
* 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if
* &fw_cdev_create_iso_context.header_size is 8 or more
* - added %FW_CDEV_IOC_*_ISO_RESOURCE*,
* %FW_CDEV_IOC_GET_SPEED, %FW_CDEV_IOC_SEND_BROADCAST_REQUEST,
* %FW_CDEV_IOC_SEND_STREAM_PACKET
* (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt
* (2.6.33) - IR has always packet-per-buffer semantics now, not one of
* dual-buffer or packet-per-buffer depending on hardware
* - shared use and auto-response for FCP registers
* 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable
* - added %FW_CDEV_IOC_GET_CYCLE_TIMER2
* 4 (2.6.36) - added %FW_CDEV_EVENT_REQUEST2, %FW_CDEV_EVENT_PHY_PACKET_*,
* and &fw_cdev_allocate.region_end
* - implemented &fw_cdev_event_bus_reset.bm_node_id
* - added %FW_CDEV_IOC_SEND_PHY_PACKET, _RECEIVE_PHY_PACKETS
* - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL,
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and
* %FW_CDEV_IOC_SET_ISO_CHANNELS
*/
#define FW_CDEV_VERSION 3
#define FW_CDEV_VERSION 3 /* Meaningless; don't use this macro. */
/**
* struct fw_cdev_get_info - General purpose information ioctl
* @version: The version field is just a running serial number.
* We never break backwards compatibility, but may add more
* structs and ioctls in later revisions.
* @version: The version field is just a running serial number. Both an
* input parameter (ABI version implemented by the client) and
* output parameter (ABI version implemented by the kernel).
* A client must not fill in an %FW_CDEV_VERSION defined from an
* included kernel header file but the actual version for which
* the client was implemented. This is necessary for forward
* compatibility. We never break backwards compatibility, but
* may add more structs, events, and ioctls in later revisions.
* @rom_length: If @rom is non-zero, at most rom_length bytes of configuration
* ROM will be copied into that user space address. In either
* case, @rom_length is updated with the actual length of the
@ -339,28 +530,48 @@ struct fw_cdev_send_response {
};
/**
* struct fw_cdev_allocate - Allocate a CSR address range
* struct fw_cdev_allocate - Allocate a CSR in an address range
* @offset: Start offset of the address range
* @closure: To be passed back to userspace in request events
* @length: Length of the address range, in bytes
* @length: Length of the CSR, in bytes
* @handle: Handle to the allocation, written by the kernel
* @region_end: First address above the address range (added in ABI v4, 2.6.36)
*
* Allocate an address range in the 48-bit address space on the local node
* (the controller). This allows userspace to listen for requests with an
* offset within that address range. When the kernel receives a request
* within the range, an &fw_cdev_event_request event will be written back.
* The @closure field is passed back to userspace in the response event.
* offset within that address range. Every time when the kernel receives a
* request within the range, an &fw_cdev_event_request2 event will be emitted.
* (If the kernel or the client implements ABI version <= 3, an
* &fw_cdev_event_request will be generated instead.)
*
* The @closure field is passed back to userspace in these request events.
* The @handle field is an out parameter, returning a handle to the allocated
* range to be used for later deallocation of the range.
*
* The address range is allocated on all local nodes. The address allocation
* is exclusive except for the FCP command and response registers.
* is exclusive except for the FCP command and response registers. If an
* exclusive address region is already in use, the ioctl fails with errno set
* to %EBUSY.
*
* If kernel and client implement ABI version >= 4, the kernel looks up a free
* spot of size @length inside [@offset..@region_end) and, if found, writes
* the start address of the new CSR back in @offset. I.e. @offset is an
* in and out parameter. If this automatic placement of a CSR in a bigger
* address range is not desired, the client simply needs to set @region_end
* = @offset + @length.
*
* If the kernel or the client implements ABI version <= 3, @region_end is
* ignored and effectively assumed to be @offset + @length.
*
* @region_end is only present in a kernel header >= 2.6.36. If necessary,
* this can for example be tested by #ifdef FW_CDEV_EVENT_REQUEST2.
*/
struct fw_cdev_allocate {
__u64 offset;
__u64 closure;
__u32 length;
__u32 handle;
__u64 region_end; /* available since kernel version 2.6.36 */
};
/**
@ -382,9 +593,14 @@ struct fw_cdev_deallocate {
* Initiate a bus reset for the bus this device is on. The bus reset can be
* either the original (long) bus reset or the arbitrated (short) bus reset
* introduced in 1394a-2000.
*
* The ioctl returns immediately. A subsequent &fw_cdev_event_bus_reset
* indicates when the reset actually happened. Since ABI v4, this may be
* considerably later than the ioctl because the kernel ensures a grace period
* between subsequent bus resets as per IEEE 1394 bus management specification.
*/
struct fw_cdev_initiate_bus_reset {
__u32 type; /* FW_CDEV_SHORT_RESET or FW_CDEV_LONG_RESET */
__u32 type;
};
/**
@ -408,9 +624,10 @@ struct fw_cdev_initiate_bus_reset {
*
* @immediate, @key, and @data array elements are CPU-endian quadlets.
*
* If successful, the kernel adds the descriptor and writes back a handle to the
* kernel-side object to be used for later removal of the descriptor block and
* immediate key.
* If successful, the kernel adds the descriptor and writes back a @handle to
* the kernel-side object to be used for later removal of the descriptor block
* and immediate key. The kernel will also generate a bus reset to signal the
* change of the configuration ROM to other nodes.
*
* This ioctl affects the configuration ROMs of all local nodes.
* The ioctl only succeeds on device files which represent a local node.
@ -429,38 +646,50 @@ struct fw_cdev_add_descriptor {
* descriptor was added
*
* Remove a descriptor block and accompanying immediate key from the local
* nodes' configuration ROMs.
* nodes' configuration ROMs. The kernel will also generate a bus reset to
* signal the change of the configuration ROM to other nodes.
*/
struct fw_cdev_remove_descriptor {
__u32 handle;
};
#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0
#define FW_CDEV_ISO_CONTEXT_RECEIVE 1
#define FW_CDEV_ISO_CONTEXT_TRANSMIT 0
#define FW_CDEV_ISO_CONTEXT_RECEIVE 1
#define FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 /* added in 2.6.36 */
/**
* struct fw_cdev_create_iso_context - Create a context for isochronous IO
* @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE
* @header_size: Header size to strip for receive contexts
* @channel: Channel to bind to
* @speed: Speed for transmit contexts
* @closure: To be returned in &fw_cdev_event_iso_interrupt
* struct fw_cdev_create_iso_context - Create a context for isochronous I/O
* @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE or
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL
* @header_size: Header size to strip in single-channel reception
* @channel: Channel to bind to in single-channel reception or transmission
* @speed: Transmission speed
* @closure: To be returned in &fw_cdev_event_iso_interrupt or
* &fw_cdev_event_iso_interrupt_multichannel
* @handle: Handle to context, written back by kernel
*
* Prior to sending or receiving isochronous I/O, a context must be created.
* The context records information about the transmit or receive configuration
* and typically maps to an underlying hardware resource. A context is set up
* for either sending or receiving. It is bound to a specific isochronous
* channel.
* @channel.
*
* In case of multichannel reception, @header_size and @channel are ignored
* and the channels are selected by %FW_CDEV_IOC_SET_ISO_CHANNELS.
*
* For %FW_CDEV_ISO_CONTEXT_RECEIVE contexts, @header_size must be at least 4
* and must be a multiple of 4. It is ignored in other context types.
*
* @speed is ignored in receive context types.
*
* If a context was successfully created, the kernel writes back a handle to the
* context, which must be passed in for subsequent operations on that context.
*
* For receive contexts, @header_size must be at least 4 and must be a multiple
* of 4.
*
* Note that the effect of a @header_size > 4 depends on
* &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt.
* Limitations:
* No more than one iso context can be created per fd.
* The total number of contexts that all userspace and kernelspace drivers can
* create on a card at a time is a hardware limit, typically 4 or 8 contexts per
* direction, and of them at most one multichannel receive context.
*/
struct fw_cdev_create_iso_context {
__u32 type;
@ -471,6 +700,22 @@ struct fw_cdev_create_iso_context {
__u32 handle;
};
/**
* struct fw_cdev_set_iso_channels - Select channels in multichannel reception
* @channels: Bitmask of channels to listen to
* @handle: Handle of the mutichannel receive context
*
* @channels is the bitwise or of 1ULL << n for each channel n to listen to.
*
* The ioctl fails with errno %EBUSY if there is already another receive context
* on a channel in @channels. In that case, the bitmask of all unoccupied
* channels is returned in @channels.
*/
struct fw_cdev_set_iso_channels {
__u64 channels;
__u32 handle;
};
#define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v)
#define FW_CDEV_ISO_INTERRUPT (1 << 16)
#define FW_CDEV_ISO_SKIP (1 << 17)
@ -481,42 +726,72 @@ struct fw_cdev_create_iso_context {
/**
* struct fw_cdev_iso_packet - Isochronous packet
* @control: Contains the header length (8 uppermost bits), the sy field
* (4 bits), the tag field (2 bits), a sync flag (1 bit),
* a skip flag (1 bit), an interrupt flag (1 bit), and the
* @control: Contains the header length (8 uppermost bits),
* the sy field (4 bits), the tag field (2 bits), a sync flag
* or a skip flag (1 bit), an interrupt flag (1 bit), and the
* payload length (16 lowermost bits)
* @header: Header and payload
* @header: Header and payload in case of a transmit context.
*
* &struct fw_cdev_iso_packet is used to describe isochronous packet queues.
*
* Use the FW_CDEV_ISO_ macros to fill in @control.
* The @header array is empty in case of receive contexts.
*
* For transmit packets, the header length must be a multiple of 4 and specifies
* the numbers of bytes in @header that will be prepended to the packet's
* payload; these bytes are copied into the kernel and will not be accessed
* after the ioctl has returned. The sy and tag fields are copied to the iso
* packet header (these fields are specified by IEEE 1394a and IEC 61883-1).
* The skip flag specifies that no packet is to be sent in a frame; when using
* this, all other fields except the interrupt flag must be zero.
* Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT:
*
* For receive packets, the header length must be a multiple of the context's
* header size; if the header length is larger than the context's header size,
* multiple packets are queued for this entry. The sy and tag fields are
* ignored. If the sync flag is set, the context drops all packets until
* a packet with a matching sy field is received (the sync value to wait for is
* specified in the &fw_cdev_start_iso structure). The payload length defines
* how many payload bytes can be received for one packet (in addition to payload
* quadlets that have been defined as headers and are stripped and returned in
* the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the
* additional bytes are dropped. If less bytes are received, the remaining
* bytes in this part of the payload buffer will not be written to, not even by
* the next packet, i.e., packets received in consecutive frames will not
* necessarily be consecutive in memory. If an entry has queued multiple
* packets, the payload length is divided equally among them.
* @control.HEADER_LENGTH must be a multiple of 4. It specifies the numbers of
* bytes in @header that will be prepended to the packet's payload. These bytes
* are copied into the kernel and will not be accessed after the ioctl has
* returned.
*
* When a packet with the interrupt flag set has been completed, the
* The @control.SY and TAG fields are copied to the iso packet header. These
* fields are specified by IEEE 1394a and IEC 61883-1.
*
* The @control.SKIP flag specifies that no packet is to be sent in a frame.
* When using this, all other fields except @control.INTERRUPT must be zero.
*
* When a packet with the @control.INTERRUPT flag set has been completed, an
* &fw_cdev_event_iso_interrupt event will be sent.
*
* Context type %FW_CDEV_ISO_CONTEXT_RECEIVE:
*
* @control.HEADER_LENGTH must be a multiple of the context's header_size.
* If the HEADER_LENGTH is larger than the context's header_size, multiple
* packets are queued for this entry.
*
* The @control.SY and TAG fields are ignored.
*
* If the @control.SYNC flag is set, the context drops all packets until a
* packet with a sy field is received which matches &fw_cdev_start_iso.sync.
*
* @control.PAYLOAD_LENGTH defines how many payload bytes can be received for
* one packet (in addition to payload quadlets that have been defined as headers
* and are stripped and returned in the &fw_cdev_event_iso_interrupt structure).
* If more bytes are received, the additional bytes are dropped. If less bytes
* are received, the remaining bytes in this part of the payload buffer will not
* be written to, not even by the next packet. I.e., packets received in
* consecutive frames will not necessarily be consecutive in memory. If an
* entry has queued multiple packets, the PAYLOAD_LENGTH is divided equally
* among them.
*
* When a packet with the @control.INTERRUPT flag set has been completed, an
* &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued
* multiple receive packets is completed when its last packet is completed.
*
* Context type %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
*
* Here, &fw_cdev_iso_packet would be more aptly named _iso_buffer_chunk since
* it specifies a chunk of the mmap()'ed buffer, while the number and alignment
* of packets to be placed into the buffer chunk is not known beforehand.
*
* @control.PAYLOAD_LENGTH is the size of the buffer chunk and specifies room
* for header, payload, padding, and trailer bytes of one or more packets.
* It must be a multiple of 4.
*
* @control.HEADER_LENGTH, TAG and SY are ignored. SYNC is treated as described
* for single-channel reception.
*
* When a buffer chunk with the @control.INTERRUPT flag set has been filled
* entirely, an &fw_cdev_event_iso_interrupt_mc event will be sent.
*/
struct fw_cdev_iso_packet {
__u32 control;
@ -525,9 +800,9 @@ struct fw_cdev_iso_packet {
/**
* struct fw_cdev_queue_iso - Queue isochronous packets for I/O
* @packets: Userspace pointer to packet data
* @packets: Userspace pointer to an array of &fw_cdev_iso_packet
* @data: Pointer into mmap()'ed payload buffer
* @size: Size of packet data in bytes
* @size: Size of the @packets array, in bytes
* @handle: Isochronous context handle
*
* Queue a number of isochronous packets for reception or transmission.
@ -540,6 +815,9 @@ struct fw_cdev_iso_packet {
* The kernel may or may not queue all packets, but will write back updated
* values of the @packets, @data and @size fields, so the ioctl can be
* resubmitted easily.
*
* In case of a multichannel receive context, @data must be quadlet-aligned
* relative to the buffer start.
*/
struct fw_cdev_queue_iso {
__u64 packets;
@ -698,4 +976,39 @@ struct fw_cdev_send_stream_packet {
__u32 speed;
};
/**
* struct fw_cdev_send_phy_packet - send a PHY packet
* @closure: Passed back to userspace in the PHY-packet-sent event
* @data: First and second quadlet of the PHY packet
* @generation: The bus generation where packet is valid
*
* The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes
* on the same card as this device. After transmission, an
* %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated.
*
* The payload @data[] shall be specified in host byte order. Usually,
* @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets
* are an exception to this rule.
*
* The ioctl is only permitted on device files which represent a local node.
*/
struct fw_cdev_send_phy_packet {
__u64 closure;
__u32 data[2];
__u32 generation;
};
/**
* struct fw_cdev_receive_phy_packets - start reception of PHY packets
* @closure: Passed back to userspace in phy packet events
*
* This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to
* incoming PHY packets from any node on the same bus as the device.
*
* The ioctl is only permitted on device files which represent a local node.
*/
struct fw_cdev_receive_phy_packets {
__u64 closure;
};
#endif /* _LINUX_FIREWIRE_CDEV_H */

View file

@ -32,11 +32,13 @@
#define CSR_CYCLE_TIME 0x200
#define CSR_BUS_TIME 0x204
#define CSR_BUSY_TIMEOUT 0x210
#define CSR_PRIORITY_BUDGET 0x218
#define CSR_BUS_MANAGER_ID 0x21c
#define CSR_BANDWIDTH_AVAILABLE 0x220
#define CSR_CHANNELS_AVAILABLE 0x224
#define CSR_CHANNELS_AVAILABLE_HI 0x224
#define CSR_CHANNELS_AVAILABLE_LO 0x228
#define CSR_MAINT_UTILITY 0x230
#define CSR_BROADCAST_CHANNEL 0x234
#define CSR_CONFIG_ROM 0x400
#define CSR_CONFIG_ROM_END 0x800
@ -89,6 +91,11 @@ struct fw_card {
struct list_head transaction_list;
unsigned long reset_jiffies;
u32 split_timeout_hi;
u32 split_timeout_lo;
unsigned int split_timeout_cycles;
unsigned int split_timeout_jiffies;
unsigned long long guid;
unsigned max_receive;
int link_speed;
@ -104,18 +111,28 @@ struct fw_card {
bool beta_repeaters_present;
int index;
struct list_head link;
/* Work struct for BM duties. */
struct delayed_work work;
struct list_head phy_receiver_list;
struct delayed_work br_work; /* bus reset job */
bool br_short;
struct delayed_work bm_work; /* bus manager job */
int bm_retries;
int bm_generation;
__be32 bm_transaction_data[2];
int bm_node_id;
bool bm_abdicate;
bool priority_budget_implemented; /* controller feature */
bool broadcast_channel_auto_allocated; /* controller feature */
bool broadcast_channel_allocated;
u32 broadcast_channel;
__be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
__be32 maint_utility_register;
};
struct fw_attribute_group {
@ -252,7 +269,7 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
int tcode, int destination, int source,
int generation, int speed,
int generation,
unsigned long long offset,
void *data, size_t length,
void *callback_data);
@ -269,10 +286,10 @@ struct fw_packet {
u32 timestamp;
/*
* This callback is called when the packet transmission has
* completed; for successful transmission, the status code is
* the ack received from the destination, otherwise it's a
* negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
* This callback is called when the packet transmission has completed.
* For successful transmission, the status code is the ack received
* from the destination. Otherwise it is one of the juju-specific
* rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
* The callback can be called from tasklet context and thus
* must never block.
*/
@ -355,17 +372,19 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc);
* scatter-gather streaming (e.g. assembling video frame automatically).
*/
struct fw_iso_packet {
u16 payload_length; /* Length of indirect payload. */
u32 interrupt:1; /* Generate interrupt on this packet */
u32 skip:1; /* Set to not send packet at all. */
u32 tag:2;
u32 sy:4;
u32 header_length:8; /* Length of immediate header. */
u32 header[0];
u16 payload_length; /* Length of indirect payload */
u32 interrupt:1; /* Generate interrupt on this packet */
u32 skip:1; /* tx: Set to not send packet at all */
/* rx: Sync bit, wait for matching sy */
u32 tag:2; /* tx: Tag in packet header */
u32 sy:4; /* tx: Sy in packet header */
u32 header_length:8; /* Length of immediate header */
u32 header[0]; /* tx: Top of 1394 isoch. data_block */
};
#define FW_ISO_CONTEXT_TRANSMIT 0
#define FW_ISO_CONTEXT_RECEIVE 1
#define FW_ISO_CONTEXT_TRANSMIT 0
#define FW_ISO_CONTEXT_RECEIVE 1
#define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2
#define FW_ISO_CONTEXT_MATCH_TAG0 1
#define FW_ISO_CONTEXT_MATCH_TAG1 2
@ -389,24 +408,31 @@ struct fw_iso_buffer {
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
u32 cycle, size_t header_length,
void *header, void *data);
typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
dma_addr_t completed, void *data);
struct fw_iso_context {
struct fw_card *card;
int type;
int channel;
int speed;
size_t header_size;
fw_iso_callback_t callback;
union {
fw_iso_callback_t sc;
fw_iso_mc_callback_t mc;
} callback;
void *callback_data;
};
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,

View file

@ -70,4 +70,9 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
int flex_array_shrink(struct flex_array *fa);
#define flex_array_put_ptr(fa, nr, src, gfp) \
flex_array_put(fa, nr, &(void *)(src), gfp)
void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
#endif /* _FLEX_ARRAY_H */

View file

@ -8,6 +8,7 @@
#include <linux/limits.h>
#include <linux/ioctl.h>
#include <linux/blk_types.h>
/*
* It's silly to have NR_OPEN bigger than NR_FILE, but you can change
@ -53,6 +54,7 @@ struct inodes_stat_t {
#define MAY_APPEND 8
#define MAY_ACCESS 16
#define MAY_OPEN 32
#define MAY_CHDIR 64
/*
* flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
@ -90,6 +92,9 @@ struct inodes_stat_t {
/* Expect random access pattern */
#define FMODE_RANDOM ((__force fmode_t)0x1000)
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
/*
* The below are the various read and write types that we support. Some of
* them include behavioral modifiers that send information down to the
@ -117,7 +122,7 @@ struct inodes_stat_t {
* immediately wait on this read without caring about
* unplugging.
* READA Used for read-ahead operations. Lower priority, and the
* block layer could (in theory) choose to ignore this
* block layer could (in theory) choose to ignore this
* request if it runs into resource problems.
* WRITE A normal async write. Device will be plugged.
* SWRITE Like WRITE, but a special case for ll_rw_block() that
@ -136,7 +141,7 @@ struct inodes_stat_t {
* SWRITE_SYNC
* SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer.
* See SWRITE.
* WRITE_BARRIER Like WRITE, but tells the block layer that all
* WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all
* previously submitted writes must be safely on storage
* before this one is started. Also guarantees that when
* this write is complete, it itself is also safely on
@ -144,29 +149,32 @@ struct inodes_stat_t {
* of this IO.
*
*/
#define RW_MASK 1
#define RWA_MASK 2
#define READ 0
#define WRITE 1
#define READA 2 /* read-ahead - don't block if no resources */
#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
#define READ_META (READ | (1 << BIO_RW_META))
#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO))
#define WRITE_META (WRITE | (1 << BIO_RW_META))
#define SWRITE_SYNC_PLUG \
(SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
#define RW_MASK REQ_WRITE
#define RWA_MASK REQ_RAHEAD
#define READ 0
#define WRITE RW_MASK
#define READA RWA_MASK
#define SWRITE (WRITE | READA)
#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
#define READ_META (READ | REQ_META)
#define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC)
#define WRITE_META (WRITE | REQ_META)
#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_HARDBARRIER)
#define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE)
#define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
/*
* These aren't really reads or writes, they pass down information about
* parts of device that are now unused by the file system.
*/
#define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD))
#define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER))
#define DISCARD_NOBARRIER (WRITE | REQ_DISCARD)
#define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER)
#define DISCARD_SECURE (DISCARD_NOBARRIER | REQ_SECURE)
#define SEL_IN 1
#define SEL_OUT 2
@ -209,6 +217,7 @@ struct inodes_stat_t {
#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define MS_I_VERSION (1<<23) /* Update inode I_version field */
#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
#define MS_BORN (1<<29)
#define MS_ACTIVE (1<<30)
#define MS_NOUSER (1<<31)
@ -309,6 +318,7 @@ struct inodes_stat_t {
#define BLKALIGNOFF _IO(0x12,122)
#define BLKPBSZGET _IO(0x12,123)
#define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@ -407,15 +417,13 @@ extern int get_max_files(void);
extern int sysctl_nr_open;
extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
#ifdef CONFIG_DNOTIFY
extern int dir_notify_enable;
#endif
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
ssize_t bytes, void *private, int ret,
bool is_async);
/*
* Attribute flags. These should be or-ed together to figure out what
@ -685,6 +693,7 @@ struct block_device {
*/
#define PAGECACHE_TAG_DIRTY 0
#define PAGECACHE_TAG_WRITEBACK 1
#define PAGECACHE_TAG_TOWRITE 2
int mapping_tagged(struct address_space *mapping, int tag);
@ -768,12 +777,7 @@ struct inode {
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */
#endif
#ifdef CONFIG_INOTIFY
struct list_head inotify_watches; /* watches on this inode */
struct mutex inotify_mutex; /* protects the watches list */
struct hlist_head i_fsnotify_marks;
#endif
unsigned long i_state;
@ -1479,8 +1483,8 @@ struct block_device_operations;
/*
* NOTE:
* read, write, poll, fsync, readv, writev, unlocked_ioctl and compat_ioctl
* can be called without the big kernel lock held in all filesystems.
* all file operations except setlease can be called without
* the big kernel lock held in all filesystems.
*/
struct file_operations {
struct module *owner;
@ -1491,7 +1495,6 @@ struct file_operations {
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
int (*readdir) (struct file *, void *, filldir_t);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@ -1561,8 +1564,8 @@ struct super_operations {
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
@ -1570,7 +1573,6 @@ struct super_operations {
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*clear_inode) (struct inode *);
void (*umount_begin) (struct super_block *);
int (*show_options)(struct seq_file *, struct vfsmount *);
@ -1615,8 +1617,8 @@ struct super_operations {
* I_FREEING Set when inode is about to be freed but still has dirty
* pages or buffers attached or the inode itself is still
* dirty.
* I_CLEAR Set by clear_inode(). In this state the inode is clean
* and can be destroyed.
* I_CLEAR Added by end_writeback(). In this state the inode is clean
* and can be destroyed. Inode keeps I_FREEING.
*
* Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
* prohibited for many purposes. iget() must wait for
@ -1813,7 +1815,8 @@ extern struct vfsmount *collect_mounts(struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
struct vfsmount *);
extern int vfs_statfs(struct dentry *, struct kstatfs *);
extern int vfs_statfs(struct path *, struct kstatfs *);
extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
@ -2163,9 +2166,8 @@ extern void iput(struct inode *);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
extern void generic_delete_inode(struct inode *inode);
extern void generic_drop_inode(struct inode *inode);
extern int generic_detach_inode(struct inode *inode);
extern int generic_delete_inode(struct inode *inode);
extern int generic_drop_inode(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
@ -2182,7 +2184,7 @@ extern void unlock_new_inode(struct inode *);
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void end_writeback(struct inode *);
extern void destroy_inode(struct inode *);
extern void __destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
@ -2198,7 +2200,6 @@ static inline void insert_inode_hash(struct inode *inode) {
extern void file_move(struct file *f, struct list_head *list);
extern void file_kill(struct file *f);
#ifdef CONFIG_BLOCK
struct bio;
extern void submit_bio(int, struct bio *);
extern int bdev_read_only(struct block_device *);
#endif
@ -2265,19 +2266,8 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
#endif
#ifdef CONFIG_BLOCK
struct bio;
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset);
void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int lock_type);
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int lock_type);
enum {
/* need locking between buffered and direct access */
@ -2287,24 +2277,13 @@ enum {
DIO_SKIP_HOLES = 0x02,
};
static inline ssize_t blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset,
nr_segs, get_block, end_io, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags);
static inline ssize_t blockdev_direct_IO_no_locking_newtrunc(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset,
nr_segs, get_block, end_io, NULL, 0);
}
static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block,
@ -2314,15 +2293,6 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
nr_segs, get_block, end_io, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}
static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
nr_segs, get_block, end_io, NULL, 0);
}
#endif
extern const struct file_operations generic_ro_fops;
@ -2349,10 +2319,10 @@ void inode_set_bytes(struct inode *inode, loff_t bytes);
extern int vfs_readdir(struct file *, filldir_t, void *);
extern int vfs_stat(char __user *, struct kstat *);
extern int vfs_lstat(char __user *, struct kstat *);
extern int vfs_stat(const char __user *, struct kstat *);
extern int vfs_lstat(const char __user *, struct kstat *);
extern int vfs_fstat(unsigned int, struct kstat *);
extern int vfs_fstatat(int , char __user *, struct kstat *, int);
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg);
@ -2384,7 +2354,6 @@ extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
extern int simple_setsize(struct inode *, loff_t);
extern int noop_fsync(struct file *, int);
extern int simple_empty(struct dentry *);
extern int simple_readpage(struct file *file, struct page *page);
@ -2421,8 +2390,7 @@ extern int buffer_migrate_page(struct address_space *,
extern int inode_change_ok(const struct inode *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
extern int __must_check inode_setattr(struct inode *, const struct iattr *);
extern void generic_setattr(struct inode *inode, const struct iattr *attr);
extern void setattr_copy(struct inode *inode, const struct iattr *attr);
extern void file_update_time(struct file *file);
@ -2513,7 +2481,8 @@ int proc_nr_files(struct ctl_table *table, int write,
int __init get_filesystem_list(char *buf);
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
(flag & FMODE_NONOTIFY)))
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */

View file

@ -21,4 +21,31 @@ extern void free_fs_struct(struct fs_struct *);
extern void daemonize_fs_struct(void);
extern int unshare_fs_struct(void);
static inline void get_fs_root(struct fs_struct *fs, struct path *root)
{
read_lock(&fs->lock);
*root = fs->root;
path_get(root);
read_unlock(&fs->lock);
}
static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
{
read_lock(&fs->lock);
*pwd = fs->pwd;
path_get(pwd);
read_unlock(&fs->lock);
}
static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
struct path *pwd)
{
read_lock(&fs->lock);
*root = fs->root;
path_get(root);
*pwd = fs->pwd;
path_get(pwd);
read_unlock(&fs->lock);
}
#endif /* _LINUX_FS_STRUCT_H */

View file

@ -20,7 +20,7 @@
#include <linux/fscache.h>
#include <linux/sched.h>
#include <linux/slow-work.h>
#include <linux/workqueue.h>
#define NR_MAXCACHES BITS_PER_LONG
@ -76,18 +76,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
struct fscache_operation {
union {
struct work_struct fast_work; /* record for fast ops */
struct slow_work slow_work; /* record for (very) slow ops */
};
struct work_struct work; /* record for async ops */
struct list_head pend_link; /* link in object->pending_ops */
struct fscache_object *object; /* object to be operated upon */
unsigned long flags;
#define FSCACHE_OP_TYPE 0x000f /* operation type */
#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
#define FSCACHE_OP_DEAD 6 /* op is now dead */
@ -105,7 +101,8 @@ struct fscache_operation {
/* operation releaser */
fscache_operation_release_t release;
#ifdef CONFIG_SLOW_WORK_DEBUG
#ifdef CONFIG_WORKQUEUE_DEBUGFS
struct work_struct put_work; /* work to delay operation put */
const char *name; /* operation name */
const char *state; /* operation state */
#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
@ -117,7 +114,7 @@ struct fscache_operation {
};
extern atomic_t fscache_op_debug_id;
extern const struct slow_work_ops fscache_op_slow_work_ops;
extern void fscache_op_work_func(struct work_struct *work);
extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_put_operation(struct fscache_operation *);
@ -128,33 +125,21 @@ extern void fscache_put_operation(struct fscache_operation *);
* @release: The release function to assign
*
* Do basic initialisation of an operation. The caller must still set flags,
* object, either fast_work or slow_work if necessary, and processor if needed.
* object and processor if needed.
*/
static inline void fscache_operation_init(struct fscache_operation *op,
fscache_operation_release_t release)
fscache_operation_processor_t processor,
fscache_operation_release_t release)
{
INIT_WORK(&op->work, fscache_op_work_func);
atomic_set(&op->usage, 1);
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
op->processor = processor;
op->release = release;
INIT_LIST_HEAD(&op->pend_link);
fscache_set_op_state(op, "Init");
}
/**
* fscache_operation_init_slow - Do additional initialisation of a slow op
* @op: The operation to initialise
* @processor: The processor function to assign
*
* Do additional initialisation of an operation as required for slow work.
*/
static inline
void fscache_operation_init_slow(struct fscache_operation *op,
fscache_operation_processor_t processor)
{
op->processor = processor;
slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
}
/*
* data read operation
*/
@ -389,7 +374,7 @@ struct fscache_object {
struct fscache_cache *cache; /* cache that supplied this object */
struct fscache_cookie *cookie; /* netfs's file/index object */
struct fscache_object *parent; /* parent object */
struct slow_work work; /* attention scheduling record */
struct work_struct work; /* attention scheduling record */
struct list_head dependents; /* FIFO of dependent objects */
struct list_head dep_link; /* link in parent's dependents list */
struct list_head pending_ops; /* unstarted operations on this object */
@ -411,7 +396,7 @@ extern const char *fscache_object_states[];
(test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
(obj)->state >= FSCACHE_OBJECT_DYING)
extern const struct slow_work_ops fscache_object_slow_work_ops;
extern void fscache_object_work_func(struct work_struct *work);
/**
* fscache_object_init - Initialise a cache object description
@ -433,7 +418,7 @@ void fscache_object_init(struct fscache_object *object,
spin_lock_init(&object->lock);
INIT_LIST_HEAD(&object->cache_link);
INIT_HLIST_NODE(&object->cookie_link);
vslow_work_init(&object->work, &fscache_object_slow_work_ops);
INIT_WORK(&object->work, fscache_object_work_func);
INIT_LIST_HEAD(&object->dependents);
INIT_LIST_HEAD(&object->dep_link);
INIT_LIST_HEAD(&object->pending_ops);
@ -534,6 +519,8 @@ extern void fscache_io_error(struct fscache_cache *cache);
extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
struct pagevec *pagevec);
extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
const void *data,
uint16_t datalen);

View file

@ -85,7 +85,7 @@ struct fscache_cookie_def {
/* get an index key
* - should store the key data in the buffer
* - should return the amount of amount stored
* - should return the amount of data stored
* - not permitted to return an error
* - the netfs data from the cookie being used as the source is
* presented
@ -454,6 +454,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
* @cookie: The cookie representing the cache object
* @mapping: The netfs inode mapping to which the pages will be attached
* @pages: A list of potential netfs pages to be filled
* @nr_pages: Number of pages to be read and/or allocated
* @end_io_func: The callback to invoke when and if each page is filled
* @context: An arbitrary piece of data to pass on to end_io_func()
* @gfp: The conditions under which memory allocation should be made

223
include/linux/fsl-diu-fb.h Normal file
View file

@ -0,0 +1,223 @@
/*
* Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Freescale DIU Frame Buffer device driver
*
* Authors: Hongjun Chen <hong-jun.chen@freescale.com>
* Paul Widmer <paul.widmer@freescale.com>
* Srikanth Srinivasan <srikanth.srinivasan@freescale.com>
* York Sun <yorksun@freescale.com>
*
* Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __FSL_DIU_FB_H__
#define __FSL_DIU_FB_H__
/* Arbitrary threshold to determine the allocation method
* See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory()
*/
#define MEM_ALLOC_THRESHOLD (1024*768*4+32)
/* Minimum value that the pixel clock can be set to in pico seconds
* This is determined by platform clock/3 where the minimum platform
* clock is 533MHz. This gives 5629 pico seconds.
*/
#define MIN_PIX_CLK 5629
#define MAX_PIX_CLK 96096
#include <linux/types.h>
struct mfb_alpha {
int enable;
int alpha;
};
struct mfb_chroma_key {
int enable;
__u8 red_max;
__u8 green_max;
__u8 blue_max;
__u8 red_min;
__u8 green_min;
__u8 blue_min;
};
struct aoi_display_offset {
int x_aoi_d;
int y_aoi_d;
};
#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key)
#define MFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t)
#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8)
#define MFB_SET_ALPHA 0x80014d00
#define MFB_GET_ALPHA 0x40014d00
#define MFB_SET_AOID 0x80084d04
#define MFB_GET_AOID 0x40084d04
#define MFB_SET_PIXFMT 0x80014d08
#define MFB_GET_PIXFMT 0x40014d08
#define FBIOGET_GWINFO 0x46E0
#define FBIOPUT_GWINFO 0x46E1
#ifdef __KERNEL__
#include <linux/spinlock.h>
/*
* These are the fields of area descriptor(in DDR memory) for every plane
*/
struct diu_ad {
/* Word 0(32-bit) in DDR memory */
/* __u16 comp; */
/* __u16 pixel_s:2; */
/* __u16 pallete:1; */
/* __u16 red_c:2; */
/* __u16 green_c:2; */
/* __u16 blue_c:2; */
/* __u16 alpha_c:3; */
/* __u16 byte_f:1; */
/* __u16 res0:3; */
__be32 pix_fmt; /* hard coding pixel format */
/* Word 1(32-bit) in DDR memory */
__le32 addr;
/* Word 2(32-bit) in DDR memory */
/* __u32 delta_xs:11; */
/* __u32 res1:1; */
/* __u32 delta_ys:11; */
/* __u32 res2:1; */
/* __u32 g_alpha:8; */
__le32 src_size_g_alpha;
/* Word 3(32-bit) in DDR memory */
/* __u32 delta_xi:11; */
/* __u32 res3:5; */
/* __u32 delta_yi:11; */
/* __u32 res4:3; */
/* __u32 flip:2; */
__le32 aoi_size;
/* Word 4(32-bit) in DDR memory */
/*__u32 offset_xi:11;
__u32 res5:5;
__u32 offset_yi:11;
__u32 res6:5;
*/
__le32 offset_xyi;
/* Word 5(32-bit) in DDR memory */
/*__u32 offset_xd:11;
__u32 res7:5;
__u32 offset_yd:11;
__u32 res8:5; */
__le32 offset_xyd;
/* Word 6(32-bit) in DDR memory */
__u8 ckmax_r;
__u8 ckmax_g;
__u8 ckmax_b;
__u8 res9;
/* Word 7(32-bit) in DDR memory */
__u8 ckmin_r;
__u8 ckmin_g;
__u8 ckmin_b;
__u8 res10;
/* __u32 res10:8; */
/* Word 8(32-bit) in DDR memory */
__le32 next_ad;
/* Word 9(32-bit) in DDR memory, just for 64-bit aligned */
__u32 paddr;
} __attribute__ ((packed));
/* DIU register map */
struct diu {
__be32 desc[3];
__be32 gamma;
__be32 pallete;
__be32 cursor;
__be32 curs_pos;
__be32 diu_mode;
__be32 bgnd;
__be32 bgnd_wb;
__be32 disp_size;
__be32 wb_size;
__be32 wb_mem_addr;
__be32 hsyn_para;
__be32 vsyn_para;
__be32 syn_pol;
__be32 thresholds;
__be32 int_status;
__be32 int_mask;
__be32 colorbar[8];
__be32 filling;
__be32 plut;
} __attribute__ ((packed));
struct diu_hw {
struct diu *diu_reg;
spinlock_t reg_lock;
__u32 mode; /* DIU operation mode */
};
struct diu_addr {
__u8 __iomem *vaddr; /* Virtual address */
dma_addr_t paddr; /* Physical address */
__u32 offset;
};
struct diu_pool {
struct diu_addr ad;
struct diu_addr gamma;
struct diu_addr pallete;
struct diu_addr cursor;
};
#define FSL_DIU_BASE_OFFSET 0x2C000 /* Offset of DIU */
#define INT_LCDC 64 /* DIU interrupt number */
#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */
/* 1 for plane 0, 2 for plane 1&2 each */
/* Minimum X and Y resolutions */
#define MIN_XRES 64
#define MIN_YRES 64
/* HW cursor parameters */
#define MAX_CURS 32
/* Modes of operation of DIU */
#define MFB_MODE0 0 /* DIU off */
#define MFB_MODE1 1 /* All three planes output to display */
#define MFB_MODE2 2 /* Plane 1 to display, planes 2+3 written back*/
#define MFB_MODE3 3 /* All three planes written back to memory */
#define MFB_MODE4 4 /* Color bar generation */
/* INT_STATUS/INT_MASK field descriptions */
#define INT_VSYNC 0x01 /* Vsync interrupt */
#define INT_VSYNC_WB 0x02 /* Vsync interrupt for write back operation */
#define INT_UNDRUN 0x04 /* Under run exception interrupt */
#define INT_PARERR 0x08 /* Display parameters error interrupt */
#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */
/* Panels'operation modes */
#define MFB_TYPE_OUTPUT 0 /* Panel output to display */
#define MFB_TYPE_OFF 1 /* Panel off */
#define MFB_TYPE_WB 2 /* Panel written back to memory */
#define MFB_TYPE_TEST 3 /* Panel generate color bar */
#endif /* __KERNEL__ */
#endif /* __FSL_DIU_FB_H__ */

View file

@ -11,8 +11,6 @@
* (C) Copyright 2005 Robert Love
*/
#include <linux/dnotify.h>
#include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
#include <linux/audit.h>
#include <linux/slab.h>
@ -21,35 +19,53 @@
* fsnotify_d_instantiate - instantiate a dentry for inode
* Called with dcache_lock held.
*/
static inline void fsnotify_d_instantiate(struct dentry *entry,
struct inode *inode)
static inline void fsnotify_d_instantiate(struct dentry *dentry,
struct inode *inode)
{
__fsnotify_d_instantiate(entry, inode);
inotify_d_instantiate(entry, inode);
__fsnotify_d_instantiate(dentry, inode);
}
/* Notify this dentry's parent about a child's events. */
static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
static inline void fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
__fsnotify_parent(dentry, mask);
if (!dentry)
dentry = path->dentry;
inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
__fsnotify_parent(path, dentry, mask);
}
/* simple call site for access decisions */
static inline int fsnotify_perm(struct file *file, int mask)
{
struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 fsnotify_mask = 0;
if (file->f_mode & FMODE_NONOTIFY)
return 0;
if (!(mask & (MAY_READ | MAY_OPEN)))
return 0;
if (mask & MAY_OPEN)
fsnotify_mask = FS_OPEN_PERM;
else if (mask & MAY_READ)
fsnotify_mask = FS_ACCESS_PERM;
else
BUG();
return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
/*
* fsnotify_d_move - entry has been moved
* Called with dcache_lock and entry->d_lock held.
* fsnotify_d_move - dentry has been moved
* Called with dcache_lock and dentry->d_lock held.
*/
static inline void fsnotify_d_move(struct dentry *entry)
static inline void fsnotify_d_move(struct dentry *dentry)
{
/*
* On move we need to update entry->d_flags to indicate if the new parent
* cares about events from this entry.
* On move we need to update dentry->d_flags to indicate if the new parent
* cares about events from this dentry.
*/
__fsnotify_update_dcache_flags(entry);
inotify_d_move(entry);
__fsnotify_update_dcache_flags(dentry);
}
/*
@ -57,8 +73,6 @@ static inline void fsnotify_d_move(struct dentry *entry)
*/
static inline void fsnotify_link_count(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
@ -66,45 +80,31 @@ static inline void fsnotify_link_count(struct inode *inode)
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
const char *old_name,
const unsigned char *old_name,
int isdir, struct inode *target, struct dentry *moved)
{
struct inode *source = moved->d_inode;
u32 in_cookie = inotify_get_cookie();
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
__u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
const char *new_name = moved->d_name.name;
const unsigned char *new_name = moved->d_name.name;
if (old_dir == new_dir)
old_dir_mask |= FS_DN_RENAME;
if (isdir) {
isdir = IN_ISDIR;
old_dir_mask |= FS_IN_ISDIR;
new_dir_mask |= FS_IN_ISDIR;
}
inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name,
source);
inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name,
source);
fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
if (target) {
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(target);
/* this is really a link_count change not a removal */
if (target)
fsnotify_link_count(target);
}
if (source) {
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
if (source)
fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
audit_inode_child(moved, new_dir);
}
@ -116,6 +116,14 @@ static inline void fsnotify_inode_delete(struct inode *inode)
__fsnotify_inode_delete(inode);
}
/*
* fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed
*/
static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
{
__fsnotify_vfsmount_delete(mnt);
}
/*
* fsnotify_nameremove - a filename was removed from a directory
*/
@ -126,7 +134,7 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
if (isdir)
mask |= FS_IN_ISDIR;
fsnotify_parent(dentry, mask);
fsnotify_parent(NULL, dentry, mask);
}
/*
@ -134,9 +142,6 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
*/
static inline void fsnotify_inoderemove(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(inode);
fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
__fsnotify_inode_delete(inode);
}
@ -146,8 +151,6 @@ static inline void fsnotify_inoderemove(struct inode *inode)
*/
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode);
audit_inode_child(dentry, inode);
fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
@ -160,8 +163,6 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{
inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
inode);
fsnotify_link_count(inode);
audit_inode_child(new_dentry, dir);
@ -176,7 +177,6 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
__u32 mask = (FS_CREATE | FS_IN_ISDIR);
struct inode *d_inode = dentry->d_inode;
inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
audit_inode_child(dentry, inode);
fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
@ -185,52 +185,55 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
/*
* fsnotify_access - file was read
*/
static inline void fsnotify_access(struct dentry *dentry)
static inline void fsnotify_access(struct file *file)
{
struct inode *inode = dentry->d_inode;
struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
if (!(file->f_mode & FMODE_NONOTIFY)) {
fsnotify_parent(path, NULL, mask);
fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
}
/*
* fsnotify_modify - file was modified
*/
static inline void fsnotify_modify(struct dentry *dentry)
static inline void fsnotify_modify(struct file *file)
{
struct inode *inode = dentry->d_inode;
struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
if (!(file->f_mode & FMODE_NONOTIFY)) {
fsnotify_parent(path, NULL, mask);
fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
}
/*
* fsnotify_open - file was opened
*/
static inline void fsnotify_open(struct dentry *dentry)
static inline void fsnotify_open(struct file *file)
{
struct inode *inode = dentry->d_inode;
struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
if (!(file->f_mode & FMODE_NONOTIFY)) {
fsnotify_parent(path, NULL, mask);
fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
}
/*
@ -238,18 +241,18 @@ static inline void fsnotify_open(struct dentry *dentry)
*/
static inline void fsnotify_close(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct path *path = &file->f_path;
struct inode *inode = file->f_path.dentry->d_inode;
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
if (!(file->f_mode & FMODE_NONOTIFY)) {
fsnotify_parent(path, NULL, mask);
fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
}
/*
@ -263,9 +266,7 @@ static inline void fsnotify_xattr(struct dentry *dentry)
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify_parent(NULL, dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
@ -299,19 +300,18 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
if (mask) {
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
fsnotify_parent(dentry, mask);
fsnotify_parent(NULL, dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
}
#if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */
#if defined(CONFIG_FSNOTIFY) /* notify helpers */
/*
* fsnotify_oldname_init - save off the old filename before we change it
*/
static inline const char *fsnotify_oldname_init(const char *name)
static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
{
return kstrdup(name, GFP_KERNEL);
}
@ -319,22 +319,22 @@ static inline const char *fsnotify_oldname_init(const char *name)
/*
* fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
*/
static inline void fsnotify_oldname_free(const char *old_name)
static inline void fsnotify_oldname_free(const unsigned char *old_name)
{
kfree(old_name);
}
#else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */
#else /* CONFIG_FSNOTIFY */
static inline const char *fsnotify_oldname_init(const char *name)
static inline const char *fsnotify_oldname_init(const unsigned char *name)
{
return NULL;
}
static inline void fsnotify_oldname_free(const char *old_name)
static inline void fsnotify_oldname_free(const unsigned char *old_name)
{
}
#endif /* ! CONFIG_INOTIFY */
#endif /* CONFIG_FSNOTIFY */
#endif /* _LINUX_FS_NOTIFY_H */

View file

@ -41,6 +41,10 @@
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
#define FS_IN_ISDIR 0x40000000 /* event occurred against dir */
#define FS_IN_ONESHOT 0x80000000 /* only send event once */
@ -58,13 +62,20 @@
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
FS_DELETE)
/* listeners that hard code group numbers near the top */
#define DNOTIFY_GROUP_NUM UINT_MAX
#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \
FS_IN_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \
FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
struct fsnotify_group;
struct fsnotify_event;
struct fsnotify_mark_entry;
struct fsnotify_mark;
struct fsnotify_event_private_data;
/*
@ -80,10 +91,16 @@ struct fsnotify_event_private_data;
* valid group and inode to use to clean up.
*/
struct fsnotify_ops {
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask);
int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event);
bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data, int data_type);
int (*handle_event)(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
struct fsnotify_event *event);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event_priv)(struct fsnotify_event_private_data *priv);
};
@ -94,22 +111,6 @@ struct fsnotify_ops {
* everything will be cleaned up.
*/
struct fsnotify_group {
/*
* global list of all groups receiving events from fsnotify.
* anchored by fsnotify_groups and protected by either fsnotify_grp_mutex
* or fsnotify_grp_srcu depending on write vs read.
*/
struct list_head group_list;
/*
* Defines all of the event types in which this group is interested.
* This mask is a bitwise OR of the FS_* events from above. Each time
* this mask changes for a group (if it changes) the correct functions
* must be called to update the global structures which indicate global
* interest in event types.
*/
__u32 mask;
/*
* How the refcnt is used is up to each group. When the refcnt hits 0
* fsnotify will clean up all of the resources associated with this group.
@ -119,7 +120,6 @@ struct fsnotify_group {
* closed.
*/
atomic_t refcnt; /* things with interest in this group */
unsigned int group_num; /* simply prevents accidental group collision */
const struct fsnotify_ops *ops; /* how this group handles things */
@ -130,15 +130,12 @@ struct fsnotify_group {
unsigned int q_len; /* events on the queue */
unsigned int max_events; /* maximum events allowed on the list */
/* stores all fastapth entries assoc with this group so they can be cleaned on unregister */
spinlock_t mark_lock; /* protect mark_entries list */
atomic_t num_marks; /* 1 for each mark entry and 1 for not being
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
spinlock_t mark_lock; /* protect marks_list */
atomic_t num_marks; /* 1 for each mark and 1 for not being
* past the point of no return when freeing
* a group */
struct list_head mark_entries; /* all inode mark entries for this group */
/* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */
bool on_group_list;
struct list_head marks_list; /* all inode marks for this group */
/* groups can define private fields here or use the void *private */
union {
@ -152,6 +149,17 @@ struct fsnotify_group {
struct user_struct *user;
} inotify_data;
#endif
#ifdef CONFIG_FANOTIFY
struct fanotify_group_private_data {
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
/* allows a group to block waiting for a userspace response */
struct mutex access_mutex;
struct list_head access_list;
wait_queue_head_t access_waitq;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
};
@ -210,20 +218,42 @@ struct fsnotify_event {
#define FSNOTIFY_EVENT_NONE 0
#define FSNOTIFY_EVENT_PATH 1
#define FSNOTIFY_EVENT_INODE 2
#define FSNOTIFY_EVENT_FILE 3
int data_type; /* which of the above union we have */
atomic_t refcnt; /* how many groups still are using/need to send this event */
__u32 mask; /* the type of access, bitwise OR for FS_* event types */
u32 sync_cookie; /* used to corrolate events, namely inotify mv events */
char *file_name;
const unsigned char *file_name;
size_t name_len;
struct pid *tgid;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
__u32 response; /* userspace answer to question */
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
struct list_head private_data_list; /* groups can store private data here */
};
/*
* a mark is simply an entry attached to an in core inode which allows an
* Inode specific fields in an fsnotify_mark
*/
struct fsnotify_inode_mark {
struct inode *inode; /* inode this mark is associated with */
struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */
struct list_head free_i_list; /* tmp list used when freeing this mark */
};
/*
* Mount point specific fields in an fsnotify_mark
*/
struct fsnotify_vfsmount_mark {
struct vfsmount *mnt; /* vfsmount this mark is associated with */
struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */
struct list_head free_m_list; /* tmp list used when freeing this mark */
};
/*
* a mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
@ -232,19 +262,28 @@ struct fsnotify_event {
* (such as dnotify) will flush these when the open fd is closed and not at
* inode eviction or modification.
*/
struct fsnotify_mark_entry {
__u32 mask; /* mask this mark entry is for */
struct fsnotify_mark {
__u32 mask; /* mask this mark is for */
/* we hold ref for each i_list and g_list. also one ref for each 'thing'
* in kernel that found and may be using this mark. */
atomic_t refcnt; /* active things looking at this mark */
struct inode *inode; /* inode this entry is associated with */
struct fsnotify_group *group; /* group this mark entry is for */
struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */
spinlock_t lock; /* protect group, inode, and killme */
struct list_head free_i_list; /* tmp list used when freeing this mark */
struct fsnotify_group *group; /* group this mark is for */
struct list_head g_list; /* list of marks by group->i_fsnotify_marks */
spinlock_t lock; /* protect group and inode */
union {
struct fsnotify_inode_mark i;
struct fsnotify_vfsmount_mark m;
};
__u32 ignored_mask; /* events types to ignore */
struct list_head free_g_list; /* tmp list used when freeing this mark */
void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */
#define FSNOTIFY_MARK_FLAG_INODE 0x01
#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
unsigned int flags; /* vfsmount or inode mark? */
struct list_head destroy_list;
void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
#ifdef CONFIG_FSNOTIFY
@ -252,10 +291,11 @@ struct fsnotify_mark_entry {
/* called from the vfs helpers */
/* main fsnotify call to send events */
extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie);
extern void __fsnotify_parent(struct dentry *dentry, __u32 mask);
extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const unsigned char *name, u32 cookie);
extern void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
@ -304,15 +344,9 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode
/* called from fsnotify listeners, such as fanotify or dnotify */
/* must call when a group changes its ->mask */
extern void fsnotify_recalc_global_mask(void);
/* get a reference to an existing or create a new group */
extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num,
__u32 mask,
const struct fsnotify_ops *ops);
/* run all marks associated with this group and update group->mask */
extern void fsnotify_recalc_group_mask(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_obtain_group */
extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
/* take a reference to an event */
@ -323,8 +357,11 @@ extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struc
struct fsnotify_event *event);
/* attach the event to the group notification queue */
extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
struct fsnotify_event_private_data *priv);
extern struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
struct fsnotify_event *event,
struct fsnotify_event_private_data *priv,
struct fsnotify_event *(*merge)(struct list_head *,
struct fsnotify_event *));
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
@ -334,38 +371,66 @@ extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group
/* functions used to manipulate the marks attached to inodes */
/* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */
extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt);
/* run all marks associated with an inode and update inode->i_fsnotify_mask */
extern void fsnotify_recalc_inode_mask(struct inode *inode);
extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark));
/* find (and take a reference) to a mark associated with group and inode */
extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
/* find (and take a reference) to a mark associated with group and vfsmount */
extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
/* copy the values from old into new */
extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
/* set the ignored_mask of a mark */
extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
/* set the mask of a mark (might pin the object into memory */
extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask);
/* attach the mark to both the group and the inode */
extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a mark, flag it to be freed when all references are dropped */
extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark);
/* run all the marks in a group, and clear all of the vfsmount marks */
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the inode marks */
extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
/* run all the marks in a group, and flag them to be freed */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry);
extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct list_head *list);
/* put here because inotify does some weird stuff when destroying watches */
extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
void *data, int data_is, const char *name,
void *data, int data_is,
const unsigned char *name,
u32 cookie, gfp_t gfp);
/* fanotify likes to change events after they are on lists... */
extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event);
extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
struct fsnotify_event *new_event);
#else
static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie)
{}
static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const unsigned char *name, u32 cookie)
{
return 0;
}
static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask)
static inline void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{}
static inline void __fsnotify_inode_delete(struct inode *inode)
{}
static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
{}
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{}

View file

@ -1,3 +1,8 @@
/*
* Ftrace header. For implementation details beyond the random comments
* scattered below, see: Documentation/trace/ftrace-design.txt
*/
#ifndef _LINUX_FTRACE_H
#define _LINUX_FTRACE_H

View file

@ -11,8 +11,6 @@ struct trace_array;
struct tracer;
struct dentry;
DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
struct trace_print_flags {
unsigned long mask;
const char *name;
@ -58,6 +56,9 @@ struct trace_iterator {
struct ring_buffer_iter *buffer_iter[NR_CPUS];
unsigned long iter_flags;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
@ -146,14 +147,19 @@ struct ftrace_event_class {
int (*raw_init)(struct ftrace_event_call *);
};
extern int ftrace_event_reg(struct ftrace_event_call *event,
enum trace_reg type);
enum {
TRACE_EVENT_FL_ENABLED_BIT,
TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_RECORDED_CMD_BIT,
};
enum {
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
};
struct ftrace_event_call {
@ -171,6 +177,7 @@ struct ftrace_event_call {
* 32 bit flags:
* bit 1: enabled
* bit 2: filter_active
* bit 3: enabled cmd record
*
* Changes to flags must hold the event_mutex.
*
@ -257,8 +264,7 @@ static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
u64 count, struct pt_regs *regs, void *head)
{
perf_tp_event(addr, count, raw_data, size, regs, head);
perf_swevent_put_recursion_context(rctx);
perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
}
#endif

View file

@ -37,6 +37,10 @@
*
* 7.14
* - add splice support to fuse device
*
* 7.15
* - add store notify
* - add retrieve notify
*/
#ifndef _LINUX_FUSE_H
@ -68,7 +72,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
#define FUSE_KERNEL_MINOR_VERSION 14
#define FUSE_KERNEL_MINOR_VERSION 15
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@ -251,6 +255,7 @@ enum fuse_opcode {
FUSE_DESTROY = 38,
FUSE_IOCTL = 39,
FUSE_POLL = 40,
FUSE_NOTIFY_REPLY = 41,
/* CUSE specific operations */
CUSE_INIT = 4096,
@ -260,6 +265,8 @@ enum fuse_notify_code {
FUSE_NOTIFY_POLL = 1,
FUSE_NOTIFY_INVAL_INODE = 2,
FUSE_NOTIFY_INVAL_ENTRY = 3,
FUSE_NOTIFY_STORE = 4,
FUSE_NOTIFY_RETRIEVE = 5,
FUSE_NOTIFY_CODE_MAX,
};
@ -568,4 +575,29 @@ struct fuse_notify_inval_entry_out {
__u32 padding;
};
struct fuse_notify_store_out {
__u64 nodeid;
__u64 offset;
__u32 size;
__u32 padding;
};
struct fuse_notify_retrieve_out {
__u64 notify_unique;
__u64 nodeid;
__u64 offset;
__u32 size;
__u32 padding;
};
/* Matches the size of fuse_write_in */
struct fuse_notify_retrieve_in {
__u64 dummy1;
__u64 offset;
__u32 size;
__u32 dummy2;
__u64 dummy3;
__u64 dummy4;
};
#endif /* _LINUX_FUSE_H */

View file

@ -17,6 +17,8 @@ struct gpio_keys_platform_data {
struct gpio_keys_button *buttons;
int nbuttons;
unsigned int rep:1; /* enable input subsystem auto repeat */
int (*enable)(struct device *dev);
void (*disable)(struct device *dev);
};
#endif

View file

@ -311,6 +311,7 @@ struct hid_item {
#define HID_QUIRK_HIDDEV_FORCE 0x00000010
#define HID_QUIRK_BADPAD 0x00000020
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000

View file

@ -2,6 +2,7 @@
#define _LINUX_HIGHMEM_H
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
@ -72,7 +73,11 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
}
#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
{
pagefault_enable();
}
#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
@ -81,6 +86,13 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
#endif /* CONFIG_HIGHMEM */
/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */
/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */
#define kunmap_atomic(addr, idx) do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
kunmap_atomic_notypecheck((addr), (idx)); \
} while (0)
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)

View file

@ -2,6 +2,7 @@
#define _LINUX_HUGETLB_H
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
struct ctl_table;
struct user_struct;
@ -14,11 +15,6 @@ struct user_struct;
int PageHuge(struct page *page);
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return vma->vm_flags & VM_HUGETLB;
}
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
@ -47,6 +43,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
int acctflags);
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
void __isolate_hwpoisoned_huge_page(struct page *page);
extern unsigned long hugepages_treat_as_movable;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
@ -77,11 +74,6 @@ static inline int PageHuge(struct page *page)
return 0;
}
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return 0;
}
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@ -108,6 +100,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define huge_pte_offset(mm, address) 0
#define __isolate_hwpoisoned_huge_page(page) 0
#define hugetlb_change_protection(vma, address, end, newprot)

View file

@ -0,0 +1,22 @@
#ifndef _LINUX_HUGETLB_INLINE_H
#define _LINUX_HUGETLB_INLINE_H
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mm.h>
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return vma->vm_flags & VM_HUGETLB;
}
#else
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
{
return 0;
}
#endif
#endif

46
include/linux/i2c-mux.h Normal file
View file

@ -0,0 +1,46 @@
/*
*
* i2c-mux.h - functions for the i2c-bus mux support
*
* Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
* Michael Lawnick <michael.lawnick.ext@nsn.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_I2C_MUX_H
#define _LINUX_I2C_MUX_H
#ifdef __KERNEL__
/*
* Called to create a i2c bus on a multiplexed bus segment.
* The mux_dev and chan_id parameters are passed to the select
* and deselect callback functions to perform hardware-specific
* mux control.
*/
struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
void *mux_dev, u32 force_nr, u32 chan_id,
int (*select) (struct i2c_adapter *,
void *mux_dev, u32 chan_id),
int (*deselect) (struct i2c_adapter *,
void *mux_dev, u32 chan_id));
int i2c_del_mux_adapter(struct i2c_adapter *adap);
#endif /* __KERNEL__ */
#endif /* _LINUX_I2C_MUX_H */

View file

@ -37,6 +37,7 @@
#include <linux/of.h> /* for struct device_node */
extern struct bus_type i2c_bus_type;
extern struct device_type i2c_adapter_type;
/* --- General options ------------------------------------------------ */
@ -108,6 +109,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
* @shutdown: Callback for device shutdown
* @suspend: Callback for device suspend
* @resume: Callback for device resume
* @alert: Alert callback, for example for the SMBus alert protocol
* @command: Callback for bus-wide signaling (optional)
* @driver: Device driver model driver
* @id_table: List of I2C devices supported by this driver
@ -233,6 +235,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
* @addr: stored in i2c_client.addr
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
@ -282,12 +285,18 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
/* If you don't know the exact address of an I2C device, use this variant
* instead, which can probe for device presence in a list of possible
* addresses.
* addresses. The "probe" callback function is optional. If it is provided,
* it must return 1 on successful probe, 0 otherwise. If it is not provided,
* a default probing method is used.
*/
extern struct i2c_client *
i2c_new_probed_device(struct i2c_adapter *adap,
struct i2c_board_info *info,
unsigned short const *addr_list);
unsigned short const *addr_list,
int (*probe)(struct i2c_adapter *, unsigned short addr));
/* Common custom probe functions */
extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
/* For devices that use several addresses, use i2c_new_dummy() to make
* client handles for the extra addresses.
@ -360,6 +369,7 @@ struct i2c_adapter {
char name[48];
struct completion dev_released;
struct mutex userspace_clients_lock;
struct list_head userspace_clients;
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@ -374,23 +384,16 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
dev_set_drvdata(&dev->dev, data);
}
/**
* i2c_lock_adapter - Prevent access to an I2C bus segment
* @adapter: Target I2C bus segment
*/
static inline void i2c_lock_adapter(struct i2c_adapter *adapter)
static inline int i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
{
rt_mutex_lock(&adapter->bus_lock);
return adapter->dev.parent != NULL
&& adapter->dev.parent->bus == &i2c_bus_type
&& adapter->dev.parent->type == &i2c_adapter_type;
}
/**
* i2c_unlock_adapter - Reauthorize access to an I2C bus segment
* @adapter: Target I2C bus segment
*/
static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
{
rt_mutex_unlock(&adapter->bus_lock);
}
/* Adapter locking functions, exported for shared pin cases */
void i2c_lock_adapter(struct i2c_adapter *);
void i2c_unlock_adapter(struct i2c_adapter *);
/*flags for the client struct: */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */

View file

@ -78,6 +78,40 @@
#define ADP5588_KEYMAPSIZE 80
#define GPI_PIN_ROW0 97
#define GPI_PIN_ROW1 98
#define GPI_PIN_ROW2 99
#define GPI_PIN_ROW3 100
#define GPI_PIN_ROW4 101
#define GPI_PIN_ROW5 102
#define GPI_PIN_ROW6 103
#define GPI_PIN_ROW7 104
#define GPI_PIN_COL0 105
#define GPI_PIN_COL1 106
#define GPI_PIN_COL2 107
#define GPI_PIN_COL3 108
#define GPI_PIN_COL4 109
#define GPI_PIN_COL5 110
#define GPI_PIN_COL6 111
#define GPI_PIN_COL7 112
#define GPI_PIN_COL8 113
#define GPI_PIN_COL9 114
#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
#define GPI_PIN_ROW_END GPI_PIN_ROW7
#define GPI_PIN_COL_BASE GPI_PIN_COL0
#define GPI_PIN_COL_END GPI_PIN_COL9
#define GPI_PIN_BASE GPI_PIN_ROW_BASE
#define GPI_PIN_END GPI_PIN_COL_END
#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
struct adp5588_gpi_map {
unsigned short pin;
unsigned short sw_evt;
};
struct adp5588_kpad_platform_data {
int rows; /* Number of rows */
int cols; /* Number of columns */
@ -87,6 +121,9 @@ struct adp5588_kpad_platform_data {
unsigned en_keylock:1; /* Enable Key Lock feature */
unsigned short unlock_key1; /* Unlock Key 1 */
unsigned short unlock_key2; /* Unlock Key 2 */
const struct adp5588_gpi_map *gpimap;
unsigned short gpimapsize;
const struct adp5588_gpio_platform_data *gpio_data;
};
struct adp5588_gpio_platform_data {

34
include/linux/i2c/mcs.h Normal file
View file

@ -0,0 +1,34 @@
/*
* Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
* Author: HeungJun Kim <riverful.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __LINUX_MCS_H
#define __LINUX_MCS_H
#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff))
#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff)
#define MCS_KEY_CODE(v) ((v) & 0xffff)
struct mcs_platform_data {
void (*cfg_pin)(void);
/* touchscreen */
unsigned int x_size;
unsigned int y_size;
/* touchkey */
const u32 *keymap;
unsigned int keymap_size;
unsigned int key_maxval;
bool no_autorepeat;
};
#endif /* __LINUX_MCS_H */

View file

@ -1,24 +0,0 @@
/*
* mcs5000_ts.h
*
* Copyright (C) 2009 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#ifndef __LINUX_MCS5000_TS_H
#define __LINUX_MCS5000_TS_H
/* platform data for the MELFAS MCS-5000 touchscreen driver */
struct mcs5000_ts_platform_data {
void (*cfg_pin)(void);
int x_size;
int y_size;
};
#endif /* __LINUX_MCS5000_TS_H */

View file

@ -0,0 +1,47 @@
/*
*
* pca954x.h - I2C multiplexer/switch support
*
* Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
* Michael Lawnick <michael.lawnick.ext@nsn.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_I2C_PCA954X_H
#define _LINUX_I2C_PCA954X_H
/* Platform data for the PCA954x I2C multiplexers */
/* Per channel initialisation data:
* @adap_id: bus number for the adapter. 0 = don't care
* @deselect_on_exit: set this entry to 1, if your H/W needs deselection
* of this channel after transaction.
*
*/
struct pca954x_platform_mode {
int adap_id;
unsigned int deselect_on_exit:1;
};
/* Per mux/switch data, used with i2c_register_board_info */
struct pca954x_platform_data {
struct pca954x_platform_mode *modes;
int num_modes;
};
#endif /* _LINUX_I2C_PCA954X_H */

View file

@ -0,0 +1,38 @@
/*
* AT42QT602240/ATMXT224 Touchscreen driver
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __LINUX_QT602240_TS_H
#define __LINUX_QT602240_TS_H
/* Orient */
#define QT602240_NORMAL 0x0
#define QT602240_DIAGONAL 0x1
#define QT602240_HORIZONTAL_FLIP 0x2
#define QT602240_ROTATED_90_COUNTER 0x3
#define QT602240_VERTICAL_FLIP 0x4
#define QT602240_ROTATED_90 0x5
#define QT602240_ROTATED_180 0x6
#define QT602240_DIAGONAL_COUNTER 0x7
/* The platform data for the AT42QT602240/ATMXT224 touchscreen driver */
struct qt602240_platform_data {
unsigned int x_line;
unsigned int y_line;
unsigned int x_size;
unsigned int y_size;
unsigned int blen;
unsigned int threshold;
unsigned int voltage;
unsigned char orient;
};
#endif /* __LINUX_QT602240_TS_H */

View file

@ -0,0 +1,78 @@
/*
* Driver for the Semtech SX150x I2C GPIO Expanders
*
* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#ifndef __LINUX_I2C_SX150X_H
#define __LINUX_I2C_SX150X_H
/**
* struct sx150x_platform_data - config data for SX150x driver
* @gpio_base: The index number of the first GPIO assigned to this
* GPIO expander. The expander will create a block of
* consecutively numbered gpios beginning at the given base,
* with the size of the block depending on the model of the
* expander chip.
* @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
* instead of as an oscillator, increasing the size of the
* GP(I)O pool created by this expander by one. The
* output-only GPO pin will be added at the end of the block.
* @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
* for each IO line in the expander. Setting the bit at
* position n will enable the pull-up for the IO at
* the corresponding offset. For chips with fewer than
* 16 IO pins, high-end bits are ignored.
* @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
* resistor for each IO line in the expander. Setting the
* bit at position n will enable the pull-down for the IO at
* the corresponding offset. For chips with fewer than
* 16 IO pins, high-end bits are ignored.
* @io_open_drain_ena: A bit-mask which enables-or disables open-drain
* operation for each IO line in the expander. Setting the
* bit at position n enables open-drain operation for
* the IO at the corresponding offset. Clearing the bit
* enables regular push-pull operation for that IO.
* For chips with fewer than 16 IO pins, high-end bits
* are ignored.
* @io_polarity: A bit-mask which enables polarity inversion for each IO line
* in the expander. Setting the bit at position n inverts
* the polarity of that IO line, while clearing it results
* in normal polarity. For chips with fewer than 16 IO pins,
* high-end bits are ignored.
* @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
* is connected, via which it reports interrupt events
* across all GPIO lines. This must be a real,
* pre-existing IRQ line.
* Setting this value < 0 disables the irq_chip functionality
* of the driver.
* @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
* IRQ lines will appear. Similarly to gpio_base, the expander
* will create a block of irqs beginning at this number.
* This value is ignored if irq_summary is < 0.
*/
struct sx150x_platform_data {
unsigned gpio_base;
bool oscio_is_gpo;
u16 io_pullup_ena;
u16 io_pulldn_ena;
u16 io_open_drain_ena;
u16 io_polarity;
int irq_summary;
unsigned irq_base;
};
#endif /* __LINUX_I2C_SX150X_H */

View file

@ -458,7 +458,7 @@ enum {
IDE_DFLAG_DOORLOCKING = (1 << 15),
/* disallow DMA */
IDE_DFLAG_NODMA = (1 << 16),
/* powermanagment told us not to do anything, so sleep nicely */
/* powermanagement told us not to do anything, so sleep nicely */
IDE_DFLAG_BLOCKED = (1 << 17),
/* sleeping & sleep field valid */
IDE_DFLAG_SLEEPING = (1 << 18),

View file

@ -73,6 +73,8 @@
#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
#define IFF_IN_NETPOLL 0x1000 /* whether we are processing netpoll */
#define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */
#define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */
#define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002

View file

@ -83,6 +83,7 @@
#define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */
#define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */
/* hashing types */
#define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */
#define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */

View file

@ -102,8 +102,6 @@ struct __fdb_entry {
#include <linux/netdevice.h>
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
extern struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
struct sk_buff *skb);
extern int (*br_should_route_hook)(struct sk_buff *skb);
#endif

View file

@ -119,7 +119,7 @@ struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
} __packed;
#ifdef __KERNEL__
#include <linux/skbuff.h>

View file

@ -67,7 +67,7 @@ struct fddi_8022_1_hdr {
__u8 dsap; /* destination service access point */
__u8 ssap; /* source service access point */
__u8 ctrl; /* control byte #1 */
} __attribute__ ((packed));
} __packed;
/* Define 802.2 Type 2 header */
struct fddi_8022_2_hdr {
@ -75,7 +75,7 @@ struct fddi_8022_2_hdr {
__u8 ssap; /* source service access point */
__u8 ctrl_1; /* control byte #1 */
__u8 ctrl_2; /* control byte #2 */
} __attribute__ ((packed));
} __packed;
/* Define 802.2 SNAP header */
#define FDDI_K_OUI_LEN 3
@ -85,7 +85,7 @@ struct fddi_snap_hdr {
__u8 ctrl; /* always 0x03 */
__u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */
__be16 ethertype; /* packet type ID field */
} __attribute__ ((packed));
} __packed;
/* Define FDDI LLC frame header */
struct fddihdr {
@ -98,7 +98,7 @@ struct fddihdr {
struct fddi_8022_2_hdr llc_8022_2;
struct fddi_snap_hdr llc_snap;
} hdr;
} __attribute__ ((packed));
} __packed;
#ifdef __KERNEL__
#include <linux/netdevice.h>

View file

@ -135,7 +135,7 @@ struct frhdr
__be16 PID;
#define IP_NLPID pad
} __attribute__((packed));
} __packed;
/* see RFC 1490 for the definition of the following */
#define FRAD_I_UI 0x03

View file

@ -104,7 +104,7 @@ struct hippi_fp_hdr {
__be32 fixed;
#endif
__be32 d2_size;
} __attribute__ ((packed));
} __packed;
struct hippi_le_hdr {
#if defined (__BIG_ENDIAN_BITFIELD)
@ -129,7 +129,7 @@ struct hippi_le_hdr {
__u8 daddr[HIPPI_ALEN];
__u16 locally_administered;
__u8 saddr[HIPPI_ALEN];
} __attribute__ ((packed));
} __packed;
#define HIPPI_OUI_LEN 3
/*
@ -142,12 +142,12 @@ struct hippi_snap_hdr {
__u8 ctrl; /* always 0x03 */
__u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/
__be16 ethertype; /* packet type ID field */
} __attribute__ ((packed));
} __packed;
struct hippi_hdr {
struct hippi_fp_hdr fp;
struct hippi_le_hdr le;
struct hippi_snap_hdr snap;
} __attribute__ ((packed));
} __packed;
#endif /* _LINUX_IF_HIPPI_H */

View file

@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/netlink.h>
/* The struct should be in sync with struct net_device_stats */
/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
__u32 rx_packets; /* total packets received */
__u32 tx_packets; /* total packets transmitted */
@ -37,6 +37,7 @@ struct rtnl_link_stats {
__u32 tx_compressed;
};
/* The main device statistics structure */
struct rtnl_link_stats64 {
__u64 rx_packets; /* total packets received */
__u64 tx_packets; /* total packets transmitted */
@ -233,7 +234,7 @@ enum macvlan_mode {
MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
};
/* SR-IOV virtual function managment section */
/* SR-IOV virtual function management section */
enum {
IFLA_VF_INFO_UNSPEC,

View file

@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <net/netlink.h>
#include <linux/u64_stats_sync.h>
#if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE)
struct socket *macvtap_get_socket(struct file *);
@ -27,14 +28,16 @@ struct macvtap_queue;
* struct macvlan_rx_stats - MACVLAN percpu rx stats
* @rx_packets: number of received packets
* @rx_bytes: number of received bytes
* @multicast: number of received multicast packets
* @rx_multicast: number of received multicast packets
* @syncp: synchronization point for 64bit counters
* @rx_errors: number of errors
*/
struct macvlan_rx_stats {
unsigned long rx_packets;
unsigned long rx_bytes;
unsigned long multicast;
unsigned long rx_errors;
u64 rx_packets;
u64 rx_bytes;
u64 rx_multicast;
struct u64_stats_sync syncp;
unsigned long rx_errors;
};
struct macvlan_dev {
@ -56,12 +59,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
{
struct macvlan_rx_stats *rx_stats;
rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
rx_stats = this_cpu_ptr(vlan->rx_stats);
if (likely(success)) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;;
rx_stats->rx_bytes += len;
if (multicast)
rx_stats->multicast++;
rx_stats->rx_multicast++;
u64_stats_update_end(&rx_stats->syncp);
} else {
rx_stats->rx_errors++;
}
@ -86,8 +91,4 @@ extern int macvlan_link_register(struct rtnl_link_ops *ops);
extern netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev);
extern struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *,
struct sk_buff *);
#endif /* _LINUX_IF_MACVLAN_H */

View file

@ -48,6 +48,7 @@ struct sockaddr_ll {
#define PACKET_LOSS 14
#define PACKET_VNET_HDR 15
#define PACKET_TX_TIMESTAMP 16
#define PACKET_TIMESTAMP 17
struct tpacket_stats {
unsigned int tp_packets;

View file

@ -59,7 +59,7 @@ struct sockaddr_pppox {
union{
struct pppoe_addr pppoe;
}sa_addr;
}__attribute__ ((packed));
} __packed;
/* The use of the above union isn't viable because the size of this
* struct must stay fixed over time -- applications use sizeof(struct
@ -70,7 +70,7 @@ struct sockaddr_pppol2tp {
sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
struct pppol2tp_addr pppol2tp;
}__attribute__ ((packed));
} __packed;
/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
* bits. So we need a different sockaddr structure.
@ -79,7 +79,7 @@ struct sockaddr_pppol2tpv3 {
sa_family_t sa_family; /* address family, AF_PPPOX */
unsigned int sa_protocol; /* protocol identifier */
struct pppol2tpv3_addr pppol2tp;
} __attribute__ ((packed));
} __packed;
/*********************************************************************
*
@ -129,7 +129,7 @@ struct pppoe_hdr {
__be16 sid;
__be16 length;
struct pppoe_tag tag[0];
} __attribute__ ((packed));
} __packed;
/* Length of entire PPPoE + PPP header */
#define PPPOE_SES_HLEN 8

View file

@ -85,6 +85,7 @@ struct in_addr {
#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
#define IP_MINTTL 21
#define IP_NODEFRAG 22
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */

View file

@ -51,6 +51,7 @@ struct inotify_event {
/* special flags */
#define IN_ONLYDIR 0x01000000 /* only watch the path if it is a directory */
#define IN_DONT_FOLLOW 0x02000000 /* don't follow a sym link */
#define IN_EXCL_UNLINK 0x04000000 /* exclude events on unlinked objects */
#define IN_MASK_ADD 0x20000000 /* add to the mask of an already existing watch */
#define IN_ISDIR 0x40000000 /* event occurred against dir */
#define IN_ONESHOT 0x80000000 /* only send event once */
@ -70,177 +71,17 @@ struct inotify_event {
#define IN_NONBLOCK O_NONBLOCK
#ifdef __KERNEL__
#include <linux/sysctl.h>
extern struct ctl_table inotify_table[]; /* for sysctl */
#include <linux/dcache.h>
#include <linux/fs.h>
#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
IN_MOVED_TO | IN_CREATE | IN_DELETE | \
IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \
IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \
IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \
IN_ISDIR | IN_ONESHOT)
/*
* struct inotify_watch - represents a watch request on a specific inode
*
* h_list is protected by ih->mutex of the associated inotify_handle.
* i_list, mask are protected by inode->inotify_mutex of the associated inode.
* ih, inode, and wd are never written to once the watch is created.
*
* Callers must use the established inotify interfaces to access inotify_watch
* contents. The content of this structure is private to the inotify
* implementation.
*/
struct inotify_watch {
struct list_head h_list; /* entry in inotify_handle's list */
struct list_head i_list; /* entry in inode's list */
atomic_t count; /* reference count */
struct inotify_handle *ih; /* associated inotify handle */
struct inode *inode; /* associated inode */
__s32 wd; /* watch descriptor */
__u32 mask; /* event mask for this watch */
};
struct inotify_operations {
void (*handle_event)(struct inotify_watch *, u32, u32, u32,
const char *, struct inode *);
void (*destroy_watch)(struct inotify_watch *);
};
#ifdef CONFIG_INOTIFY
/* Kernel API for producing events */
extern void inotify_d_instantiate(struct dentry *, struct inode *);
extern void inotify_d_move(struct dentry *);
extern void inotify_inode_queue_event(struct inode *, __u32, __u32,
const char *, struct inode *);
extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32,
const char *);
extern void inotify_unmount_inodes(struct list_head *);
extern void inotify_inode_is_dead(struct inode *);
extern u32 inotify_get_cookie(void);
/* Kernel Consumer API */
extern struct inotify_handle *inotify_init(const struct inotify_operations *);
extern void inotify_init_watch(struct inotify_watch *);
extern void inotify_destroy(struct inotify_handle *);
extern __s32 inotify_find_watch(struct inotify_handle *, struct inode *,
struct inotify_watch **);
extern __s32 inotify_find_update_watch(struct inotify_handle *, struct inode *,
u32);
extern __s32 inotify_add_watch(struct inotify_handle *, struct inotify_watch *,
struct inode *, __u32);
extern __s32 inotify_clone_watch(struct inotify_watch *, struct inotify_watch *);
extern void inotify_evict_watch(struct inotify_watch *);
extern int inotify_rm_watch(struct inotify_handle *, struct inotify_watch *);
extern int inotify_rm_wd(struct inotify_handle *, __u32);
extern void inotify_remove_watch_locked(struct inotify_handle *,
struct inotify_watch *);
extern void get_inotify_watch(struct inotify_watch *);
extern void put_inotify_watch(struct inotify_watch *);
extern int pin_inotify_watch(struct inotify_watch *);
extern void unpin_inotify_watch(struct inotify_watch *);
#else
static inline void inotify_d_instantiate(struct dentry *dentry,
struct inode *inode)
{
}
static inline void inotify_d_move(struct dentry *dentry)
{
}
static inline void inotify_inode_queue_event(struct inode *inode,
__u32 mask, __u32 cookie,
const char *filename,
struct inode *n_inode)
{
}
static inline void inotify_dentry_parent_queue_event(struct dentry *dentry,
__u32 mask, __u32 cookie,
const char *filename)
{
}
static inline void inotify_unmount_inodes(struct list_head *list)
{
}
static inline void inotify_inode_is_dead(struct inode *inode)
{
}
static inline u32 inotify_get_cookie(void)
{
return 0;
}
static inline struct inotify_handle *inotify_init(const struct inotify_operations *ops)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void inotify_init_watch(struct inotify_watch *watch)
{
}
static inline void inotify_destroy(struct inotify_handle *ih)
{
}
static inline __s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
struct inotify_watch **watchp)
{
return -EOPNOTSUPP;
}
static inline __s32 inotify_find_update_watch(struct inotify_handle *ih,
struct inode *inode, u32 mask)
{
return -EOPNOTSUPP;
}
static inline __s32 inotify_add_watch(struct inotify_handle *ih,
struct inotify_watch *watch,
struct inode *inode, __u32 mask)
{
return -EOPNOTSUPP;
}
static inline int inotify_rm_watch(struct inotify_handle *ih,
struct inotify_watch *watch)
{
return -EOPNOTSUPP;
}
static inline int inotify_rm_wd(struct inotify_handle *ih, __u32 wd)
{
return -EOPNOTSUPP;
}
static inline void inotify_remove_watch_locked(struct inotify_handle *ih,
struct inotify_watch *watch)
{
}
static inline void get_inotify_watch(struct inotify_watch *watch)
{
}
static inline void put_inotify_watch(struct inotify_watch *watch)
{
}
extern inline int pin_inotify_watch(struct inotify_watch *watch)
{
return 0;
}
extern inline void unpin_inotify_watch(struct inotify_watch *watch)
{
}
#endif /* CONFIG_INOTIFY */
#endif /* __KERNEL __ */
#endif
#endif /* _LINUX_INOTIFY_H */

View file

@ -691,9 +691,12 @@ struct input_absinfo {
#define ABS_TILT_X 0x1a
#define ABS_TILT_Y 0x1b
#define ABS_TOOL_WIDTH 0x1c
#define ABS_VOLUME 0x20
#define ABS_MISC 0x28
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
#define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */
@ -706,6 +709,12 @@ struct input_absinfo {
#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */
#define ABS_MT_PRESSURE 0x3a /* Pressure on contact area */
#ifdef __KERNEL__
/* Implementation details, userspace should not care about these */
#define ABS_MT_FIRST ABS_MT_TOUCH_MAJOR
#define ABS_MT_LAST ABS_MT_PRESSURE
#endif
#define ABS_MAX 0x3f
#define ABS_CNT (ABS_MAX+1)
@ -767,6 +776,7 @@ struct input_absinfo {
#define REP_DELAY 0x00
#define REP_PERIOD 0x01
#define REP_MAX 0x01
#define REP_CNT (REP_MAX+1)
/*
* Sounds
@ -1047,6 +1057,14 @@ struct ff_effect {
#include <linux/timer.h>
#include <linux/mod_devicetable.h>
/**
* struct input_mt_slot - represents the state of an input MT slot
* @abs: holds current values of ABS_MT axes for this slot
*/
struct input_mt_slot {
int abs[ABS_MT_LAST - ABS_MT_FIRST + 1];
};
/**
* struct input_dev - represents an input device
* @name: name of the device
@ -1063,6 +1081,10 @@ struct ff_effect {
* @sndbit: bitmap of sound effects supported by the device
* @ffbit: bitmap of force feedback effects supported by the device
* @swbit: bitmap of switches present on the device
* @hint_events_per_packet: average number of events generated by the
* device in a packet (between EV_SYN/SYN_REPORT events). Used by
* event handlers to estimate size of the buffer needed to hold
* events.
* @keycodemax: size of keycode table
* @keycodesize: size of elements in keycode table
* @keycode: map of scancodes to keycodes for this device
@ -1078,18 +1100,18 @@ struct ff_effect {
* @repeat_key: stores key code of the last key pressed; used to implement
* software autorepeat
* @timer: timer for software autorepeat
* @sync: set to 1 when there were no new events since last EV_SYNC
* @abs: current values for reports from absolute axes
* @rep: current values for autorepeat parameters (delay, rate)
* @mt: pointer to array of struct input_mt_slot holding current values
* of tracked contacts
* @mtsize: number of MT slots the device uses
* @slot: MT slot currently being transmitted
* @absinfo: array of &struct absinfo elements holding information
* about absolute axes (current value, min, max, flat, fuzz,
* resolution)
* @key: reflects current state of device's keys/buttons
* @led: reflects current state of device's LEDs
* @snd: reflects current state of sound effects
* @sw: reflects current state of device's switches
* @absmax: maximum values for events coming from absolute axes
* @absmin: minimum values for events coming from absolute axes
* @absfuzz: describes noisiness for axes
* @absflat: size of the center flat position (used by joydev)
* @absres: resolution used for events coming form absolute axes
* @open: this method is called when the very first user calls
* input_open_device(). The driver must prepare the device
* to start generating events (start polling thread,
@ -1119,6 +1141,7 @@ struct ff_effect {
* last user closes the device
* @going_away: marks devices that are in a middle of unregistering and
* causes input_open_device*() fail with -ENODEV.
* @sync: set to %true when there were no new events since last EV_SYN
* @dev: driver model's view of this device
* @h_list: list of input handles associated with the device. When
* accessing the list dev->mutex must be held
@ -1140,6 +1163,8 @@ struct input_dev {
unsigned long ffbit[BITS_TO_LONGS(FF_CNT)];
unsigned long swbit[BITS_TO_LONGS(SW_CNT)];
unsigned int hint_events_per_packet;
unsigned int keycodemax;
unsigned int keycodesize;
void *keycode;
@ -1153,22 +1178,19 @@ struct input_dev {
unsigned int repeat_key;
struct timer_list timer;
int sync;
int rep[REP_CNT];
int abs[ABS_CNT];
int rep[REP_MAX + 1];
struct input_mt_slot *mt;
int mtsize;
int slot;
struct input_absinfo *absinfo;
unsigned long key[BITS_TO_LONGS(KEY_CNT)];
unsigned long led[BITS_TO_LONGS(LED_CNT)];
unsigned long snd[BITS_TO_LONGS(SND_CNT)];
unsigned long sw[BITS_TO_LONGS(SW_CNT)];
int absmax[ABS_CNT];
int absmin[ABS_CNT];
int absfuzz[ABS_CNT];
int absflat[ABS_CNT];
int absres[ABS_CNT];
int (*open)(struct input_dev *dev);
void (*close)(struct input_dev *dev);
int (*flush)(struct input_dev *dev, struct file *file);
@ -1182,6 +1204,8 @@ struct input_dev {
unsigned int users;
bool going_away;
bool sync;
struct device dev;
struct list_head h_list;
@ -1406,18 +1430,54 @@ static inline void input_mt_sync(struct input_dev *dev)
input_event(dev, EV_SYN, SYN_MT_REPORT, 0);
}
static inline void input_mt_slot(struct input_dev *dev, int slot)
{
input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
}
void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code);
static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat)
/**
* input_set_events_per_packet - tell handlers about the driver event rate
* @dev: the input device used by the driver
* @n_events: the average number of events between calls to input_sync()
*
* If the event rate sent from a device is unusually large, use this
* function to set the expected event rate. This will allow handlers
* to set up an appropriate buffer size for the event stream, in order
* to minimize information loss.
*/
static inline void input_set_events_per_packet(struct input_dev *dev, int n_events)
{
dev->absmin[axis] = min;
dev->absmax[axis] = max;
dev->absfuzz[axis] = fuzz;
dev->absflat[axis] = flat;
dev->absbit[BIT_WORD(axis)] |= BIT_MASK(axis);
dev->hint_events_per_packet = n_events;
}
void input_alloc_absinfo(struct input_dev *dev);
void input_set_abs_params(struct input_dev *dev, unsigned int axis,
int min, int max, int fuzz, int flat);
#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \
static inline int input_abs_get_##_suffix(struct input_dev *dev, \
unsigned int axis) \
{ \
return dev->absinfo ? dev->absinfo[axis]._item : 0; \
} \
\
static inline void input_abs_set_##_suffix(struct input_dev *dev, \
unsigned int axis, int val) \
{ \
input_alloc_absinfo(dev); \
if (dev->absinfo) \
dev->absinfo[axis]._item = val; \
}
INPUT_GENERATE_ABS_ACCESSORS(val, value)
INPUT_GENERATE_ABS_ACCESSORS(min, minimum)
INPUT_GENERATE_ABS_ACCESSORS(max, maximum)
INPUT_GENERATE_ABS_ACCESSORS(fuzz, fuzz)
INPUT_GENERATE_ABS_ACCESSORS(flat, flat)
INPUT_GENERATE_ABS_ACCESSORS(res, resolution)
int input_get_keycode(struct input_dev *dev,
unsigned int scancode, unsigned int *keycode);
int input_set_keycode(struct input_dev *dev,
@ -1485,5 +1545,8 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file);
int input_ff_create_memless(struct input_dev *dev, void *data,
int (*play_effect)(struct input_dev *, void *, struct ff_effect *));
int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots);
void input_mt_destroy_slots(struct input_dev *dev);
#endif
#endif

View file

@ -0,0 +1,349 @@
/*
* include/linux/input/adxl34x.h
*
* Digital Accelerometer characteristics are highly application specific
* and may vary between boards and models. The platform_data for the
* device's "struct device" holds this information.
*
* Copyright 2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#ifndef __LINUX_INPUT_ADXL34X_H__
#define __LINUX_INPUT_ADXL34X_H__
struct adxl34x_platform_data {
/*
* X,Y,Z Axis Offset:
* offer user offset adjustments in twoscompliment
* form with a scale factor of 15.6 mg/LSB (i.e. 0x7F = +2 g)
*/
s8 x_axis_offset;
s8 y_axis_offset;
s8 z_axis_offset;
/*
* TAP_X/Y/Z Enable: Setting TAP_X, Y, or Z Enable enables X,
* Y, or Z participation in Tap detection. A '0' excludes the
* selected axis from participation in Tap detection.
* Setting the SUPPRESS bit suppresses Double Tap detection if
* acceleration greater than tap_threshold is present between
* taps.
*/
#define ADXL_SUPPRESS (1 << 3)
#define ADXL_TAP_X_EN (1 << 2)
#define ADXL_TAP_Y_EN (1 << 1)
#define ADXL_TAP_Z_EN (1 << 0)
u8 tap_axis_control;
/*
* tap_threshold:
* holds the threshold value for tap detection/interrupts.
* The data format is unsigned. The scale factor is 62.5 mg/LSB
* (i.e. 0xFF = +16 g). A zero value may result in undesirable
* behavior if Tap/Double Tap is enabled.
*/
u8 tap_threshold;
/*
* tap_duration:
* is an unsigned time value representing the maximum
* time that an event must be above the tap_threshold threshold
* to qualify as a tap event. The scale factor is 625 us/LSB. A zero
* value will prevent Tap/Double Tap functions from working.
*/
u8 tap_duration;
/*
* tap_latency:
* is an unsigned time value representing the wait time
* from the detection of a tap event to the opening of the time
* window tap_window for a possible second tap event. The scale
* factor is 1.25 ms/LSB. A zero value will disable the Double Tap
* function.
*/
u8 tap_latency;
/*
* tap_window:
* is an unsigned time value representing the amount
* of time after the expiration of tap_latency during which a second
* tap can begin. The scale factor is 1.25 ms/LSB. A zero value will
* disable the Double Tap function.
*/
u8 tap_window;
/*
* act_axis_control:
* X/Y/Z Enable: A '1' enables X, Y, or Z participation in activity
* or inactivity detection. A '0' excludes the selected axis from
* participation. If all of the axes are excluded, the function is
* disabled.
* AC/DC: A '0' = DC coupled operation and a '1' = AC coupled
* operation. In DC coupled operation, the current acceleration is
* compared with activity_threshold and inactivity_threshold directly
* to determine whether activity or inactivity is detected. In AC
* coupled operation for activity detection, the acceleration value
* at the start of activity detection is taken as a reference value.
* New samples of acceleration are then compared to this
* reference value and if the magnitude of the difference exceeds
* activity_threshold the device will trigger an activity interrupt. In
* AC coupled operation for inactivity detection, a reference value
* is used again for comparison and is updated whenever the
* device exceeds the inactivity threshold. Once the reference
* value is selected, the device compares the magnitude of the
* difference between the reference value and the current
* acceleration with inactivity_threshold. If the difference is below
* inactivity_threshold for a total of inactivity_time, the device is
* considered inactive and the inactivity interrupt is triggered.
*/
#define ADXL_ACT_ACDC (1 << 7)
#define ADXL_ACT_X_EN (1 << 6)
#define ADXL_ACT_Y_EN (1 << 5)
#define ADXL_ACT_Z_EN (1 << 4)
#define ADXL_INACT_ACDC (1 << 3)
#define ADXL_INACT_X_EN (1 << 2)
#define ADXL_INACT_Y_EN (1 << 1)
#define ADXL_INACT_Z_EN (1 << 0)
u8 act_axis_control;
/*
* activity_threshold:
* holds the threshold value for activity detection.
* The data format is unsigned. The scale factor is
* 62.5 mg/LSB. A zero value may result in undesirable behavior if
* Activity interrupt is enabled.
*/
u8 activity_threshold;
/*
* inactivity_threshold:
* holds the threshold value for inactivity
* detection. The data format is unsigned. The scale
* factor is 62.5 mg/LSB. A zero value may result in undesirable
* behavior if Inactivity interrupt is enabled.
*/
u8 inactivity_threshold;
/*
* inactivity_time:
* is an unsigned time value representing the
* amount of time that acceleration must be below the value in
* inactivity_threshold for inactivity to be declared. The scale factor
* is 1 second/LSB. Unlike the other interrupt functions, which
* operate on unfiltered data, the inactivity function operates on the
* filtered output data. At least one output sample must be
* generated for the inactivity interrupt to be triggered. This will
* result in the function appearing un-responsive if the
* inactivity_time register is set with a value less than the time
* constant of the Output Data Rate. A zero value will result in an
* interrupt when the output data is below inactivity_threshold.
*/
u8 inactivity_time;
/*
* free_fall_threshold:
* holds the threshold value for Free-Fall detection.
* The data format is unsigned. The root-sum-square(RSS) value
* of all axes is calculated and compared to the value in
* free_fall_threshold to determine if a free fall event may be
* occurring. The scale factor is 62.5 mg/LSB. A zero value may
* result in undesirable behavior if Free-Fall interrupt is
* enabled. Values between 300 and 600 mg (0x05 to 0x09) are
* recommended.
*/
u8 free_fall_threshold;
/*
* free_fall_time:
* is an unsigned time value representing the minimum
* time that the RSS value of all axes must be less than
* free_fall_threshold to generate a Free-Fall interrupt. The
* scale factor is 5 ms/LSB. A zero value may result in
* undesirable behavior if Free-Fall interrupt is enabled.
* Values between 100 to 350 ms (0x14 to 0x46) are recommended.
*/
u8 free_fall_time;
/*
* data_rate:
* Selects device bandwidth and output data rate.
* RATE = 3200 Hz / (2^(15 - x)). Default value is 0x0A, or 100 Hz
* Output Data Rate. An Output Data Rate should be selected that
* is appropriate for the communication protocol and frequency
* selected. Selecting too high of an Output Data Rate with a low
* communication speed will result in samples being discarded.
*/
u8 data_rate;
/*
* data_range:
* FULL_RES: When this bit is set with the device is
* in Full-Resolution Mode, where the output resolution increases
* with RANGE to maintain a 4 mg/LSB scale factor. When this
* bit is cleared the device is in 10-bit Mode and RANGE determine the
* maximum g-Range and scale factor.
*/
#define ADXL_FULL_RES (1 << 3)
#define ADXL_RANGE_PM_2g 0
#define ADXL_RANGE_PM_4g 1
#define ADXL_RANGE_PM_8g 2
#define ADXL_RANGE_PM_16g 3
u8 data_range;
/*
* low_power_mode:
* A '0' = Normal operation and a '1' = Reduced
* power operation with somewhat higher noise.
*/
u8 low_power_mode;
/*
* power_mode:
* LINK: A '1' with both the activity and inactivity functions
* enabled will delay the start of the activity function until
* inactivity is detected. Once activity is detected, inactivity
* detection will begin and prevent the detection of activity. This
* bit serially links the activity and inactivity functions. When '0'
* the inactivity and activity functions are concurrent. Additional
* information can be found in the Application section under Link
* Mode.
* AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode
* when inactivity (acceleration has been below inactivity_threshold
* for at least inactivity_time) is detected and the LINK bit is set.
* A '0' disables automatic switching to Sleep Mode. See SLEEP
* for further description.
*/
#define ADXL_LINK (1 << 5)
#define ADXL_AUTO_SLEEP (1 << 4)
u8 power_mode;
/*
* fifo_mode:
* BYPASS The FIFO is bypassed
* FIFO FIFO collects up to 32 values then stops collecting data
* STREAM FIFO holds the last 32 data values. Once full, the FIFO's
* oldest data is lost as it is replaced with newer data
*
* DEFAULT should be ADXL_FIFO_STREAM
*/
#define ADXL_FIFO_BYPASS 0
#define ADXL_FIFO_FIFO 1
#define ADXL_FIFO_STREAM 2
u8 fifo_mode;
/*
* watermark:
* The Watermark feature can be used to reduce the interrupt load
* of the system. The FIFO fills up to the value stored in watermark
* [1..32] and then generates an interrupt.
* A '0' disables the watermark feature.
*/
u8 watermark;
u32 ev_type; /* EV_ABS or EV_REL */
u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */
u32 ev_code_y; /* ABS_X,Y,Z or REL_X,Y,Z */
u32 ev_code_z; /* ABS_X,Y,Z or REL_X,Y,Z */
/*
* A valid BTN or KEY Code; use tap_axis_control to disable
* event reporting
*/
u32 ev_code_tap[3]; /* EV_KEY {X-Axis, Y-Axis, Z-Axis} */
/*
* A valid BTN or KEY Code for Free-Fall or Activity enables
* input event reporting. A '0' disables the Free-Fall or
* Activity reporting.
*/
u32 ev_code_ff; /* EV_KEY */
u32 ev_code_act_inactivity; /* EV_KEY */
/*
* Use ADXL34x INT2 instead of INT1
*/
u8 use_int2;
/*
* ADXL346 only ORIENTATION SENSING feature
* The orientation function of the ADXL346 reports both 2-D and
* 3-D orientation concurrently.
*/
#define ADXL_EN_ORIENTATION_2D 1
#define ADXL_EN_ORIENTATION_3D 2
#define ADXL_EN_ORIENTATION_2D_3D 3
u8 orientation_enable;
/*
* The width of the deadzone region between two or more
* orientation positions is determined by setting the Deadzone
* value. The deadzone region size can be specified with a
* resolution of 3.6deg. The deadzone angle represents the total
* angle where the orientation is considered invalid.
*/
#define ADXL_DEADZONE_ANGLE_0p0 0 /* !!!0.0 [deg] */
#define ADXL_DEADZONE_ANGLE_3p6 1 /* 3.6 [deg] */
#define ADXL_DEADZONE_ANGLE_7p2 2 /* 7.2 [deg] */
#define ADXL_DEADZONE_ANGLE_10p8 3 /* 10.8 [deg] */
#define ADXL_DEADZONE_ANGLE_14p4 4 /* 14.4 [deg] */
#define ADXL_DEADZONE_ANGLE_18p0 5 /* 18.0 [deg] */
#define ADXL_DEADZONE_ANGLE_21p6 6 /* 21.6 [deg] */
#define ADXL_DEADZONE_ANGLE_25p2 7 /* 25.2 [deg] */
u8 deadzone_angle;
/*
* To eliminate most human motion such as walking or shaking,
* a Divisor value should be selected to effectively limit the
* orientation bandwidth. Set the depth of the filter used to
* low-pass filter the measured acceleration for stable
* orientation sensing
*/
#define ADXL_LP_FILTER_DIVISOR_2 0
#define ADXL_LP_FILTER_DIVISOR_4 1
#define ADXL_LP_FILTER_DIVISOR_8 2
#define ADXL_LP_FILTER_DIVISOR_16 3
#define ADXL_LP_FILTER_DIVISOR_32 4
#define ADXL_LP_FILTER_DIVISOR_64 5
#define ADXL_LP_FILTER_DIVISOR_128 6
#define ADXL_LP_FILTER_DIVISOR_256 7
u8 divisor_length;
u32 ev_codes_orient_2d[4]; /* EV_KEY {+X, -X, +Y, -Y} */
u32 ev_codes_orient_3d[6]; /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */
};
#endif

View file

@ -0,0 +1,10 @@
#ifndef _LINUX_CY8CTMG110_PDATA_H
#define _LINUX_CY8CTMG110_PDATA_H
struct cy8ctmg110_pdata
{
int reset_pin; /* Reset pin is wired to this GPIO (optional) */
int irq_pin; /* IRQ pin is wired to this GPIO */
};
#endif

View file

@ -41,6 +41,9 @@ struct matrix_keymap_data {
* @col_scan_delay_us: delay, measured in microseconds, that is
* needed before we can keypad after activating column gpio
* @debounce_ms: debounce interval in milliseconds
* @clustered_irq: may be specified if interrupts of all row/column GPIOs
* are bundled to one single irq
* @clustered_irq_flags: flags that are needed for the clustered irq
* @active_low: gpio polarity
* @wakeup: controls whether the device should be set up as wakeup
* source
@ -63,6 +66,9 @@ struct matrix_keypad_platform_data {
/* key debounce interval in milli-second */
unsigned int debounce_ms;
unsigned int clustered_irq;
unsigned int clustered_irq_flags;
bool active_low;
bool wakeup;
bool no_autorepeat;

View file

@ -0,0 +1,86 @@
/*
* intel_mid_dma.h - Intel MID DMA Drivers
*
* Copyright (C) 2008-10 Intel Corp
* Author: Vinod Koul <vinod.koul@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*
*/
#ifndef __INTEL_MID_DMA_H__
#define __INTEL_MID_DMA_H__
#include <linux/dmaengine.h>
/*DMA transaction width, src and dstn width would be same
The DMA length must be width aligned,
for 32 bit width the length must be 32 bit (4bytes) aligned only*/
enum intel_mid_dma_width {
LNW_DMA_WIDTH_8BIT = 0x0,
LNW_DMA_WIDTH_16BIT = 0x1,
LNW_DMA_WIDTH_32BIT = 0x2,
};
/*DMA mode configurations*/
enum intel_mid_dma_mode {
LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/
LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/
};
/*DMA handshaking*/
enum intel_mid_dma_hs_mode {
LNW_DMA_HW_HS = 0, /*HW Handshaking only*/
LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/
};
/*Burst size configuration*/
enum intel_mid_dma_msize {
LNW_DMA_MSIZE_1 = 0x0,
LNW_DMA_MSIZE_4 = 0x1,
LNW_DMA_MSIZE_8 = 0x2,
LNW_DMA_MSIZE_16 = 0x3,
LNW_DMA_MSIZE_32 = 0x4,
LNW_DMA_MSIZE_64 = 0x5,
};
/**
* struct intel_mid_dma_slave - DMA slave structure
*
* @dirn: DMA trf direction
* @src_width: tx register width
* @dst_width: rx register width
* @hs_mode: HW/SW handshaking mode
* @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
* @src_msize: Source DMA burst size
* @dst_msize: Dst DMA burst size
* @device_instance: DMA peripheral device instance, we can have multiple
* peripheral device connected to single DMAC
*/
struct intel_mid_dma_slave {
enum dma_data_direction dirn;
enum intel_mid_dma_width src_width; /*width of DMA src txn*/
enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/
enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
enum intel_mid_dma_msize src_msize; /*size if src burst*/
enum intel_mid_dma_msize dst_msize; /*size of dst burst*/
unsigned int device_instance; /*0, 1 for periphral instance*/
};
#endif /*__INTEL_MID_DMA_H__*/

View file

@ -0,0 +1,15 @@
#ifndef LINUX_INTEL_PMIC_H
#define LINUX_INTEL_PMIC_H
struct intel_pmic_gpio_platform_data {
/* the first IRQ of the chip */
unsigned irq_base;
/* number assigned to the first GPIO */
unsigned gpio_base;
/* sram address for gpiointr register, the langwell chip will map
* the PMIC spi GPIO expander's GPIOINTR register in sram.
*/
unsigned gpiointr;
};
#endif

View file

@ -53,16 +53,21 @@
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
*
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
#define IRQF_TIMER 0x00000200
#define __IRQF_TIMER 0x00000200
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
#define IRQF_NO_SUSPEND 0x00004000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
/*
* Bits used by threaded handlers:

Some files were not shown because too many files have changed in this diff Show more