Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"Bigger kernel side changes:
- Add backwards writing capability to the perf ring-buffer code,
which is preparation for future advanced features like robust
'overwrite support' and snapshot mode. (Wang Nan)
- Add pause and resume ioctls for the perf ringbuffer (Wang Nan)
- x86 Intel cstate code cleanups and reorgnization (Thomas Gleixner)
- x86 Intel uncore and CPU PMU driver updates (Kan Liang, Peter
Zijlstra)
- x86 AUX (Intel PT) related enhancements and updates (Alexander
Shishkin)
- x86 MSR PMU driver enhancements and updates (Huang Rui)
- ... and lots of other changes spread out over 40+ commits.
Biggest tooling side changes:
- 'perf trace' features and enhancements. (Arnaldo Carvalho de Melo)
- BPF tooling updates (Wang Nan)
- 'perf sched' updates (Jiri Olsa)
- 'perf probe' updates (Masami Hiramatsu)
- ... plus 200+ other enhancements, fixes and cleanups to tools/
The merge commits, the shortlog and the changelogs contain a lot more
details"
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (249 commits)
perf/core: Disable the event on a truncated AUX record
perf/x86/intel/pt: Generate PMI in the STOP region as well
perf buildid-cache: Use lsdir() for looking up buildid caches
perf symbols: Use lsdir() for the search in kcore cache directory
perf tools: Use SBUILD_ID_SIZE where applicable
perf tools: Fix lsdir to set errno correctly
perf trace: Move seccomp args beautifiers to tools/perf/trace/beauty/
perf trace: Move flock op beautifier to tools/perf/trace/beauty/
perf build: Add build-test for debug-frame on arm/arm64
perf build: Add build-test for libunwind cross-platforms support
perf script: Fix export of callchains with recursion in db-export
perf script: Fix callchain addresses in db-export
perf script: Fix symbol insertion behavior in db-export
perf symbols: Add dso__insert_symbol function
perf scripting python: Use Py_FatalError instead of die()
perf tools: Remove xrealloc and ALLOC_GROW
perf help: Do not use ALLOC_GROW in add_cmd_list
perf pmu: Make pmu_formats_string to check return value of strbuf
perf header: Make topology checkers to check return value of strbuf
perf tools: Make alias handler to check return value of strbuf
...
This commit is contained in:
commit
36db171cc7
217 changed files with 8344 additions and 2747 deletions
|
|
@ -58,7 +58,7 @@ struct perf_guest_info_callbacks {
|
|||
|
||||
struct perf_callchain_entry {
|
||||
__u64 nr;
|
||||
__u64 ip[PERF_MAX_STACK_DEPTH];
|
||||
__u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
|
||||
};
|
||||
|
||||
struct perf_raw_record {
|
||||
|
|
@ -151,6 +151,15 @@ struct hw_perf_event {
|
|||
*/
|
||||
struct task_struct *target;
|
||||
|
||||
/*
|
||||
* PMU would store hardware filter configuration
|
||||
* here.
|
||||
*/
|
||||
void *addr_filters;
|
||||
|
||||
/* Last sync'ed generation of filters */
|
||||
unsigned long addr_filters_gen;
|
||||
|
||||
/*
|
||||
* hw_perf_event::state flags; used to track the PERF_EF_* state.
|
||||
*/
|
||||
|
|
@ -216,6 +225,7 @@ struct perf_event;
|
|||
#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
|
||||
#define PERF_PMU_CAP_EXCLUSIVE 0x10
|
||||
#define PERF_PMU_CAP_ITRACE 0x20
|
||||
#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
|
||||
|
||||
/**
|
||||
* struct pmu - generic performance monitoring unit
|
||||
|
|
@ -240,6 +250,9 @@ struct pmu {
|
|||
int task_ctx_nr;
|
||||
int hrtimer_interval_ms;
|
||||
|
||||
/* number of address filters this PMU can do */
|
||||
unsigned int nr_addr_filters;
|
||||
|
||||
/*
|
||||
* Fully disable/enable this PMU, can be used to protect from the PMI
|
||||
* as well as for lazy/batch writing of the MSRs.
|
||||
|
|
@ -392,12 +405,71 @@ struct pmu {
|
|||
*/
|
||||
void (*free_aux) (void *aux); /* optional */
|
||||
|
||||
/*
|
||||
* Validate address range filters: make sure the HW supports the
|
||||
* requested configuration and number of filters; return 0 if the
|
||||
* supplied filters are valid, -errno otherwise.
|
||||
*
|
||||
* Runs in the context of the ioctl()ing process and is not serialized
|
||||
* with the rest of the PMU callbacks.
|
||||
*/
|
||||
int (*addr_filters_validate) (struct list_head *filters);
|
||||
/* optional */
|
||||
|
||||
/*
|
||||
* Synchronize address range filter configuration:
|
||||
* translate hw-agnostic filters into hardware configuration in
|
||||
* event::hw::addr_filters.
|
||||
*
|
||||
* Runs as a part of filter sync sequence that is done in ->start()
|
||||
* callback by calling perf_event_addr_filters_sync().
|
||||
*
|
||||
* May (and should) traverse event::addr_filters::list, for which its
|
||||
* caller provides necessary serialization.
|
||||
*/
|
||||
void (*addr_filters_sync) (struct perf_event *event);
|
||||
/* optional */
|
||||
|
||||
/*
|
||||
* Filter events for PMU-specific reasons.
|
||||
*/
|
||||
int (*filter_match) (struct perf_event *event); /* optional */
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_addr_filter - address range filter definition
|
||||
* @entry: event's filter list linkage
|
||||
* @inode: object file's inode for file-based filters
|
||||
* @offset: filter range offset
|
||||
* @size: filter range size
|
||||
* @range: 1: range, 0: address
|
||||
* @filter: 1: filter/start, 0: stop
|
||||
*
|
||||
* This is a hardware-agnostic filter configuration as specified by the user.
|
||||
*/
|
||||
struct perf_addr_filter {
|
||||
struct list_head entry;
|
||||
struct inode *inode;
|
||||
unsigned long offset;
|
||||
unsigned long size;
|
||||
unsigned int range : 1,
|
||||
filter : 1;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_addr_filters_head - container for address range filters
|
||||
* @list: list of filters for this event
|
||||
* @lock: spinlock that serializes accesses to the @list and event's
|
||||
* (and its children's) filter generations.
|
||||
*
|
||||
* A child event will use parent's @list (and therefore @lock), so they are
|
||||
* bundled together; see perf_event_addr_filters().
|
||||
*/
|
||||
struct perf_addr_filters_head {
|
||||
struct list_head list;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum perf_event_active_state - the states of a event
|
||||
*/
|
||||
|
|
@ -566,6 +638,12 @@ struct perf_event {
|
|||
|
||||
atomic_t event_limit;
|
||||
|
||||
/* address range filters */
|
||||
struct perf_addr_filters_head addr_filters;
|
||||
/* vma address array for file-based filders */
|
||||
unsigned long *addr_filters_offs;
|
||||
unsigned long addr_filters_gen;
|
||||
|
||||
void (*destroy)(struct perf_event *);
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
|
|
@ -834,9 +912,25 @@ extern int perf_event_overflow(struct perf_event *event,
|
|||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
|
||||
extern void perf_event_output_forward(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
extern void perf_event_output_backward(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
extern void perf_event_output(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs);
|
||||
|
||||
static inline bool
|
||||
is_default_overflow_handler(struct perf_event *event)
|
||||
{
|
||||
if (likely(event->overflow_handler == perf_event_output_forward))
|
||||
return true;
|
||||
if (unlikely(event->overflow_handler == perf_event_output_backward))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
extern void
|
||||
perf_event_header__init_id(struct perf_event_header *header,
|
||||
|
|
@ -977,9 +1071,11 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
|||
extern int get_callchain_buffers(void);
|
||||
extern void put_callchain_buffers(void);
|
||||
|
||||
extern int sysctl_perf_event_max_stack;
|
||||
|
||||
static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
|
||||
{
|
||||
if (entry->nr < PERF_MAX_STACK_DEPTH) {
|
||||
if (entry->nr < sysctl_perf_event_max_stack) {
|
||||
entry->ip[entry->nr++] = ip;
|
||||
return 0;
|
||||
} else {
|
||||
|
|
@ -1001,6 +1097,8 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
|
|||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
static inline bool perf_paranoid_tracepoint_raw(void)
|
||||
{
|
||||
|
|
@ -1045,8 +1143,41 @@ static inline bool has_aux(struct perf_event *event)
|
|||
return event->pmu->setup_aux;
|
||||
}
|
||||
|
||||
static inline bool is_write_backward(struct perf_event *event)
|
||||
{
|
||||
return !!event->attr.write_backward;
|
||||
}
|
||||
|
||||
static inline bool has_addr_filter(struct perf_event *event)
|
||||
{
|
||||
return event->pmu->nr_addr_filters;
|
||||
}
|
||||
|
||||
/*
|
||||
* An inherited event uses parent's filters
|
||||
*/
|
||||
static inline struct perf_addr_filters_head *
|
||||
perf_event_addr_filters(struct perf_event *event)
|
||||
{
|
||||
struct perf_addr_filters_head *ifh = &event->addr_filters;
|
||||
|
||||
if (event->parent)
|
||||
ifh = &event->parent->addr_filters;
|
||||
|
||||
return ifh;
|
||||
}
|
||||
|
||||
extern void perf_event_addr_filters_sync(struct perf_event *event);
|
||||
|
||||
extern int perf_output_begin(struct perf_output_handle *handle,
|
||||
struct perf_event *event, unsigned int size);
|
||||
extern int perf_output_begin_forward(struct perf_output_handle *handle,
|
||||
struct perf_event *event,
|
||||
unsigned int size);
|
||||
extern int perf_output_begin_backward(struct perf_output_handle *handle,
|
||||
struct perf_event *event,
|
||||
unsigned int size);
|
||||
|
||||
extern void perf_output_end(struct perf_output_handle *handle);
|
||||
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
|
||||
const void *buf, unsigned int len);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue