blob: 4238bcba9d60fc0aaa697a2a83818556db7c66cf [file] [log] [blame]
Jamie Iles1b8873a2010-02-02 20:25:44 +01001#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
Will Deacon43eab872010-11-13 19:04:32 +00007 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
Jean PIHET796d1292010-01-26 18:51:05 +01008 *
Jamie Iles1b8873a2010-02-02 20:25:44 +01009 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
11 * code.
12 */
13#define pr_fmt(fmt) "hw perfevents: " fmt
14
Jamie Iles1b8873a2010-02-02 20:25:44 +010015#include <linux/kernel.h>
Will Deacon49c006b2010-04-29 17:13:24 +010016#include <linux/platform_device.h>
Jon Hunter7be29582012-05-31 13:05:20 -050017#include <linux/pm_runtime.h>
Will Deacon5505b202012-07-29 13:09:14 +010018#include <linux/uaccess.h>
Stephen Boydbbd64552014-02-07 21:01:19 +000019#include <linux/irq.h>
20#include <linux/irqdesc.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010021
Jamie Iles1b8873a2010-02-02 20:25:44 +010022#include <asm/irq_regs.h>
23#include <asm/pmu.h>
24#include <asm/stacktrace.h>
25
Jamie Iles1b8873a2010-02-02 20:25:44 +010026static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010027armpmu_map_cache_event(const unsigned (*cache_map)
28 [PERF_COUNT_HW_CACHE_MAX]
29 [PERF_COUNT_HW_CACHE_OP_MAX]
30 [PERF_COUNT_HW_CACHE_RESULT_MAX],
31 u64 config)
Jamie Iles1b8873a2010-02-02 20:25:44 +010032{
33 unsigned int cache_type, cache_op, cache_result, ret;
34
35 cache_type = (config >> 0) & 0xff;
36 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
37 return -EINVAL;
38
39 cache_op = (config >> 8) & 0xff;
40 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
41 return -EINVAL;
42
43 cache_result = (config >> 16) & 0xff;
44 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
45 return -EINVAL;
46
Mark Rutlande1f431b2011-04-28 15:47:10 +010047 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
Jamie Iles1b8873a2010-02-02 20:25:44 +010048
49 if (ret == CACHE_OP_UNSUPPORTED)
50 return -ENOENT;
51
52 return ret;
53}
54
55static int
Will Deacon6dbc0022012-07-29 12:36:28 +010056armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000057{
Stephen Boydd9f96632013-08-08 18:41:59 +010058 int mapping;
59
60 if (config >= PERF_COUNT_HW_MAX)
61 return -EINVAL;
62
63 mapping = (*event_map)[config];
Mark Rutlande1f431b2011-04-28 15:47:10 +010064 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
Will Deacon84fee972010-11-13 17:13:56 +000065}
66
67static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010068armpmu_map_raw_event(u32 raw_event_mask, u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000069{
Mark Rutlande1f431b2011-04-28 15:47:10 +010070 return (int)(config & raw_event_mask);
71}
72
Will Deacon6dbc0022012-07-29 12:36:28 +010073int
74armpmu_map_event(struct perf_event *event,
75 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
76 const unsigned (*cache_map)
77 [PERF_COUNT_HW_CACHE_MAX]
78 [PERF_COUNT_HW_CACHE_OP_MAX]
79 [PERF_COUNT_HW_CACHE_RESULT_MAX],
80 u32 raw_event_mask)
Mark Rutlande1f431b2011-04-28 15:47:10 +010081{
82 u64 config = event->attr.config;
83
84 switch (event->attr.type) {
85 case PERF_TYPE_HARDWARE:
Will Deacon6dbc0022012-07-29 12:36:28 +010086 return armpmu_map_hw_event(event_map, config);
Mark Rutlande1f431b2011-04-28 15:47:10 +010087 case PERF_TYPE_HW_CACHE:
88 return armpmu_map_cache_event(cache_map, config);
89 case PERF_TYPE_RAW:
90 return armpmu_map_raw_event(raw_event_mask, config);
91 }
92
93 return -ENOENT;
Will Deacon84fee972010-11-13 17:13:56 +000094}
95
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010096int armpmu_event_set_period(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +010097{
Mark Rutland8a16b342011-04-28 16:27:54 +010098 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010099 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200100 s64 left = local64_read(&hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100101 s64 period = hwc->sample_period;
102 int ret = 0;
103
104 if (unlikely(left <= -period)) {
105 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200106 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100107 hwc->last_period = period;
108 ret = 1;
109 }
110
111 if (unlikely(left <= 0)) {
112 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200113 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100114 hwc->last_period = period;
115 ret = 1;
116 }
117
118 if (left > (s64)armpmu->max_period)
119 left = armpmu->max_period;
120
Peter Zijlstrae7850592010-05-21 14:43:08 +0200121 local64_set(&hwc->prev_count, (u64)-left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100122
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100123 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100124
125 perf_event_update_userpage(event);
126
127 return ret;
128}
129
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100130u64 armpmu_event_update(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100131{
Mark Rutland8a16b342011-04-28 16:27:54 +0100132 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100133 struct hw_perf_event *hwc = &event->hw;
Will Deacona7378232011-03-25 17:12:37 +0100134 u64 delta, prev_raw_count, new_raw_count;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100135
136again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200137 prev_raw_count = local64_read(&hwc->prev_count);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100138 new_raw_count = armpmu->read_counter(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100139
Peter Zijlstrae7850592010-05-21 14:43:08 +0200140 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100141 new_raw_count) != prev_raw_count)
142 goto again;
143
Will Deacon57273472012-03-06 17:33:17 +0100144 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100145
Peter Zijlstrae7850592010-05-21 14:43:08 +0200146 local64_add(delta, &event->count);
147 local64_sub(delta, &hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100148
149 return new_raw_count;
150}
151
152static void
Jamie Iles1b8873a2010-02-02 20:25:44 +0100153armpmu_read(struct perf_event *event)
154{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100155 armpmu_event_update(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100156}
157
158static void
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200159armpmu_stop(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100160{
Mark Rutland8a16b342011-04-28 16:27:54 +0100161 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100162 struct hw_perf_event *hwc = &event->hw;
163
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200164 /*
165 * ARM pmu always has to update the counter, so ignore
166 * PERF_EF_UPDATE, see comments in armpmu_start().
167 */
168 if (!(hwc->state & PERF_HES_STOPPED)) {
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100169 armpmu->disable(event);
170 armpmu_event_update(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200171 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
172 }
173}
174
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100175static void armpmu_start(struct perf_event *event, int flags)
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200176{
Mark Rutland8a16b342011-04-28 16:27:54 +0100177 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200178 struct hw_perf_event *hwc = &event->hw;
179
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200180 /*
181 * ARM pmu always has to reprogram the period, so ignore
182 * PERF_EF_RELOAD, see the comment below.
183 */
184 if (flags & PERF_EF_RELOAD)
185 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
186
187 hwc->state = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100188 /*
189 * Set the period again. Some counters can't be stopped, so when we
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200190 * were stopped we simply disabled the IRQ source and the counter
Jamie Iles1b8873a2010-02-02 20:25:44 +0100191 * may have been left counting. If we don't do this step then we may
192 * get an interrupt too soon or *way* too late if the overflow has
193 * happened since disabling.
194 */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100195 armpmu_event_set_period(event);
196 armpmu->enable(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100197}
198
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200199static void
200armpmu_del(struct perf_event *event, int flags)
201{
Mark Rutland8a16b342011-04-28 16:27:54 +0100202 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100203 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200204 struct hw_perf_event *hwc = &event->hw;
205 int idx = hwc->idx;
206
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200207 armpmu_stop(event, PERF_EF_UPDATE);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100208 hw_events->events[idx] = NULL;
209 clear_bit(idx, hw_events->used_mask);
Stephen Boydeab443e2014-02-07 21:01:22 +0000210 if (armpmu->clear_event_idx)
211 armpmu->clear_event_idx(hw_events, event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200212
213 perf_event_update_userpage(event);
214}
215
Jamie Iles1b8873a2010-02-02 20:25:44 +0100216static int
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200217armpmu_add(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100218{
Mark Rutland8a16b342011-04-28 16:27:54 +0100219 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100220 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100221 struct hw_perf_event *hwc = &event->hw;
222 int idx;
223 int err = 0;
224
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200225 perf_pmu_disable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200226
Jamie Iles1b8873a2010-02-02 20:25:44 +0100227 /* If we don't have a space for the counter then finish early. */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100228 idx = armpmu->get_event_idx(hw_events, event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100229 if (idx < 0) {
230 err = idx;
231 goto out;
232 }
233
234 /*
235 * If there is an event in the counter we are going to use then make
236 * sure it is disabled.
237 */
238 event->hw.idx = idx;
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100239 armpmu->disable(event);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100240 hw_events->events[idx] = event;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100241
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200242 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
243 if (flags & PERF_EF_START)
244 armpmu_start(event, PERF_EF_RELOAD);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100245
246 /* Propagate our changes to the userspace mapping. */
247 perf_event_update_userpage(event);
248
249out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200250 perf_pmu_enable(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100251 return err;
252}
253
Jamie Iles1b8873a2010-02-02 20:25:44 +0100254static int
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100255validate_event(struct pmu_hw_events *hw_events,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100256 struct perf_event *event)
257{
Mark Rutland8a16b342011-04-28 16:27:54 +0100258 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100259
Will Deaconc95eb312013-08-07 23:39:41 +0100260 if (is_software_event(event))
261 return 1;
262
Will Deacon2dfcb802013-10-09 13:51:29 +0100263 if (event->state < PERF_EVENT_STATE_OFF)
Will Deaconcb2d8b32013-04-12 19:04:19 +0100264 return 1;
265
266 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
Will Deacon65b47112010-09-02 09:32:08 +0100267 return 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100268
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100269 return armpmu->get_event_idx(hw_events, event) >= 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100270}
271
272static int
273validate_group(struct perf_event *event)
274{
275 struct perf_event *sibling, *leader = event->group_leader;
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100276 struct pmu_hw_events fake_pmu;
Will Deaconbce34d12011-11-17 15:05:14 +0000277 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100278
Will Deaconbce34d12011-11-17 15:05:14 +0000279 /*
280 * Initialise the fake PMU. We only need to populate the
281 * used_mask for the purposes of validation.
282 */
283 memset(fake_used_mask, 0, sizeof(fake_used_mask));
284 fake_pmu.used_mask = fake_used_mask;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100285
286 if (!validate_event(&fake_pmu, leader))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100287 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100288
289 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
290 if (!validate_event(&fake_pmu, sibling))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100291 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100292 }
293
294 if (!validate_event(&fake_pmu, event))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100295 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100296
297 return 0;
298}
299
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100300static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530301{
Stephen Boydbbd64552014-02-07 21:01:19 +0000302 struct arm_pmu *armpmu;
303 struct platform_device *plat_device;
304 struct arm_pmu_platdata *plat;
Will Deacon5f5092e2014-02-11 18:08:41 +0000305 int ret;
306 u64 start_clock, finish_clock;
Stephen Boydbbd64552014-02-07 21:01:19 +0000307
308 if (irq_is_percpu(irq))
309 dev = *(void **)dev;
310 armpmu = dev;
311 plat_device = armpmu->plat_device;
312 plat = dev_get_platdata(&plat_device->dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530313
Will Deacon5f5092e2014-02-11 18:08:41 +0000314 start_clock = sched_clock();
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100315 if (plat && plat->handle_irq)
Will Deacon5f5092e2014-02-11 18:08:41 +0000316 ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100317 else
Will Deacon5f5092e2014-02-11 18:08:41 +0000318 ret = armpmu->handle_irq(irq, dev);
319 finish_clock = sched_clock();
320
321 perf_sample_event_took(finish_clock - start_clock);
322 return ret;
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530323}
324
Will Deacon0b390e22011-07-27 15:18:59 +0100325static void
Mark Rutland8a16b342011-04-28 16:27:54 +0100326armpmu_release_hardware(struct arm_pmu *armpmu)
Will Deacon0b390e22011-07-27 15:18:59 +0100327{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100328 armpmu->free_irq(armpmu);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100329 pm_runtime_put_sync(&armpmu->plat_device->dev);
Will Deacon0b390e22011-07-27 15:18:59 +0100330}
331
Jamie Iles1b8873a2010-02-02 20:25:44 +0100332static int
Mark Rutland8a16b342011-04-28 16:27:54 +0100333armpmu_reserve_hardware(struct arm_pmu *armpmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100334{
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100335 int err;
Mark Rutlanda9356a02011-05-04 09:23:15 +0100336 struct platform_device *pmu_device = armpmu->plat_device;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100337
Will Deacone5a21322011-11-22 18:01:46 +0000338 if (!pmu_device)
339 return -ENODEV;
340
Jon Hunter7be29582012-05-31 13:05:20 -0500341 pm_runtime_get_sync(&pmu_device->dev);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100342 err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100343 if (err) {
344 armpmu_release_hardware(armpmu);
345 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100346 }
347
Will Deacon0b390e22011-07-27 15:18:59 +0100348 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100349}
350
Jamie Iles1b8873a2010-02-02 20:25:44 +0100351static void
352hw_perf_event_destroy(struct perf_event *event)
353{
Mark Rutland8a16b342011-04-28 16:27:54 +0100354 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100355 atomic_t *active_events = &armpmu->active_events;
356 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
357
358 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
Mark Rutland8a16b342011-04-28 16:27:54 +0100359 armpmu_release_hardware(armpmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100360 mutex_unlock(pmu_reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100361 }
362}
363
364static int
Will Deacon05d22fd2011-07-19 11:57:30 +0100365event_requires_mode_exclusion(struct perf_event_attr *attr)
366{
367 return attr->exclude_idle || attr->exclude_user ||
368 attr->exclude_kernel || attr->exclude_hv;
369}
370
371static int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100372__hw_perf_event_init(struct perf_event *event)
373{
Mark Rutland8a16b342011-04-28 16:27:54 +0100374 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100375 struct hw_perf_event *hwc = &event->hw;
Mark Rutland9dcbf462013-01-18 16:10:06 +0000376 int mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100377
Mark Rutlande1f431b2011-04-28 15:47:10 +0100378 mapping = armpmu->map_event(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100379
380 if (mapping < 0) {
381 pr_debug("event %x:%llx not supported\n", event->attr.type,
382 event->attr.config);
383 return mapping;
384 }
385
386 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100387 * We don't assign an index until we actually place the event onto
388 * hardware. Use -1 to signify that we haven't decided where to put it
389 * yet. For SMP systems, each core has it's own PMU so we can't do any
390 * clever allocation or constraints checking at this point.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100391 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100392 hwc->idx = -1;
393 hwc->config_base = 0;
394 hwc->config = 0;
395 hwc->event_base = 0;
396
397 /*
398 * Check whether we need to exclude the counter from certain modes.
399 */
400 if ((!armpmu->set_event_filter ||
401 armpmu->set_event_filter(hwc, &event->attr)) &&
402 event_requires_mode_exclusion(&event->attr)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100403 pr_debug("ARM performance counters do not support "
404 "mode exclusion\n");
Will Deaconfdeb8e32012-07-04 18:15:42 +0100405 return -EOPNOTSUPP;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100406 }
407
408 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100409 * Store the event encoding into the config_base field.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100410 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100411 hwc->config_base |= (unsigned long)mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100412
Vince Weaveredcb4d32014-05-16 17:15:49 -0400413 if (!is_sampling_event(event)) {
Will Deacon57273472012-03-06 17:33:17 +0100414 /*
415 * For non-sampling runs, limit the sample_period to half
416 * of the counter width. That way, the new counter value
417 * is far less likely to overtake the previous one unless
418 * you have some serious IRQ latency issues.
419 */
420 hwc->sample_period = armpmu->max_period >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100421 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200422 local64_set(&hwc->period_left, hwc->sample_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100423 }
424
Jamie Iles1b8873a2010-02-02 20:25:44 +0100425 if (event->group_leader != event) {
Chen Gange595ede2013-02-28 17:51:29 +0100426 if (validate_group(event) != 0)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100427 return -EINVAL;
428 }
429
Mark Rutland9dcbf462013-01-18 16:10:06 +0000430 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100431}
432
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200433static int armpmu_event_init(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100434{
Mark Rutland8a16b342011-04-28 16:27:54 +0100435 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100436 int err = 0;
Mark Rutland03b78982011-04-27 11:20:11 +0100437 atomic_t *active_events = &armpmu->active_events;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100438
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100439 /* does not support taken branch sampling */
440 if (has_branch_stack(event))
441 return -EOPNOTSUPP;
442
Mark Rutlande1f431b2011-04-28 15:47:10 +0100443 if (armpmu->map_event(event) == -ENOENT)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200444 return -ENOENT;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200445
Jamie Iles1b8873a2010-02-02 20:25:44 +0100446 event->destroy = hw_perf_event_destroy;
447
Mark Rutland03b78982011-04-27 11:20:11 +0100448 if (!atomic_inc_not_zero(active_events)) {
449 mutex_lock(&armpmu->reserve_mutex);
450 if (atomic_read(active_events) == 0)
Mark Rutland8a16b342011-04-28 16:27:54 +0100451 err = armpmu_reserve_hardware(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100452
453 if (!err)
Mark Rutland03b78982011-04-27 11:20:11 +0100454 atomic_inc(active_events);
455 mutex_unlock(&armpmu->reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100456 }
457
458 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200459 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100460
461 err = __hw_perf_event_init(event);
462 if (err)
463 hw_perf_event_destroy(event);
464
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200465 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100466}
467
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200468static void armpmu_enable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100469{
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100470 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100471 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Mark Rutland7325eae2011-08-23 11:59:49 +0100472 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100473
Will Deaconf4f38432011-07-01 14:38:12 +0100474 if (enabled)
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100475 armpmu->start(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100476}
477
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200478static void armpmu_disable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100479{
Mark Rutland8a16b342011-04-28 16:27:54 +0100480 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100481 armpmu->stop(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100482}
483
Jon Hunter7be29582012-05-31 13:05:20 -0500484#ifdef CONFIG_PM_RUNTIME
485static int armpmu_runtime_resume(struct device *dev)
486{
487 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
488
489 if (plat && plat->runtime_resume)
490 return plat->runtime_resume(dev);
491
492 return 0;
493}
494
495static int armpmu_runtime_suspend(struct device *dev)
496{
497 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
498
499 if (plat && plat->runtime_suspend)
500 return plat->runtime_suspend(dev);
501
502 return 0;
503}
504#endif
505
Will Deacon6dbc0022012-07-29 12:36:28 +0100506const struct dev_pm_ops armpmu_dev_pm_ops = {
507 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
508};
509
Stephen Boyd44d6b1f2013-03-05 03:54:06 +0100510static void armpmu_init(struct arm_pmu *armpmu)
Mark Rutland03b78982011-04-27 11:20:11 +0100511{
512 atomic_set(&armpmu->active_events, 0);
513 mutex_init(&armpmu->reserve_mutex);
Mark Rutland8a16b342011-04-28 16:27:54 +0100514
515 armpmu->pmu = (struct pmu) {
516 .pmu_enable = armpmu_enable,
517 .pmu_disable = armpmu_disable,
518 .event_init = armpmu_event_init,
519 .add = armpmu_add,
520 .del = armpmu_del,
521 .start = armpmu_start,
522 .stop = armpmu_stop,
523 .read = armpmu_read,
524 };
525}
526
Will Deacon03052302012-09-21 14:23:47 +0100527int armpmu_register(struct arm_pmu *armpmu, int type)
Mark Rutland8a16b342011-04-28 16:27:54 +0100528{
529 armpmu_init(armpmu);
Jon Hunter2ac29a12012-10-25 21:23:18 +0100530 pm_runtime_enable(&armpmu->plat_device->dev);
Will Deacon04236f92012-07-28 17:42:22 +0100531 pr_info("enabled with %s PMU driver, %d counters available\n",
532 armpmu->name, armpmu->num_events);
Will Deacon03052302012-09-21 14:23:47 +0100533 return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
Mark Rutland03b78982011-04-27 11:20:11 +0100534}
535
Jamie Iles1b8873a2010-02-02 20:25:44 +0100536/*
537 * Callchain handling code.
538 */
Jamie Iles1b8873a2010-02-02 20:25:44 +0100539
540/*
541 * The registers we're interested in are at the end of the variable
542 * length saved register structure. The fp points at the end of this
543 * structure so the address of this struct is:
544 * (struct frame_tail *)(xxx->fp)-1
545 *
546 * This code has been adapted from the ARM OProfile support.
547 */
548struct frame_tail {
Will Deacon4d6b7a72010-11-30 18:15:53 +0100549 struct frame_tail __user *fp;
550 unsigned long sp;
551 unsigned long lr;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100552} __attribute__((packed));
553
554/*
555 * Get the return address for a single stackframe and return a pointer to the
556 * next frame tail.
557 */
Will Deacon4d6b7a72010-11-30 18:15:53 +0100558static struct frame_tail __user *
559user_backtrace(struct frame_tail __user *tail,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100560 struct perf_callchain_entry *entry)
561{
562 struct frame_tail buftail;
563
564 /* Also check accessibility of one struct frame_tail beyond */
565 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
566 return NULL;
567 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
568 return NULL;
569
Frederic Weisbecker70791ce2010-06-29 19:34:05 +0200570 perf_callchain_store(entry, buftail.lr);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100571
572 /*
573 * Frame pointers should strictly progress back up the stack
574 * (towards higher addresses).
575 */
Rabin Vincentcb061992011-02-09 11:35:12 +0100576 if (tail + 1 >= buftail.fp)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100577 return NULL;
578
579 return buftail.fp - 1;
580}
581
Frederic Weisbecker56962b4442010-06-30 23:03:51 +0200582void
583perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100584{
Will Deacon4d6b7a72010-11-30 18:15:53 +0100585 struct frame_tail __user *tail;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100586
Marc Zyngiere50c5412012-09-13 16:40:46 +0100587 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
588 /* We don't support guest os callchain now */
589 return;
590 }
Jamie Iles1b8873a2010-02-02 20:25:44 +0100591
Jed Davisc5f927a2013-06-20 10:16:29 +0100592 perf_callchain_store(entry, regs->ARM_pc);
Will Deacon4d6b7a72010-11-30 18:15:53 +0100593 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100594
Sonny Rao860ad782011-04-18 22:12:59 +0100595 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
596 tail && !((unsigned long)tail & 0x3))
Jamie Iles1b8873a2010-02-02 20:25:44 +0100597 tail = user_backtrace(tail, entry);
598}
599
600/*
601 * Gets called by walk_stackframe() for every stackframe. This will be called
602 * whist unwinding the stackframe and is like a subroutine return so we use
603 * the PC.
604 */
605static int
606callchain_trace(struct stackframe *fr,
607 void *data)
608{
609 struct perf_callchain_entry *entry = data;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +0200610 perf_callchain_store(entry, fr->pc);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100611 return 0;
612}
613
Frederic Weisbecker56962b4442010-06-30 23:03:51 +0200614void
615perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100616{
617 struct stackframe fr;
618
Marc Zyngiere50c5412012-09-13 16:40:46 +0100619 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
620 /* We don't support guest os callchain now */
621 return;
622 }
623
Jamie Iles1b8873a2010-02-02 20:25:44 +0100624 fr.fp = regs->ARM_fp;
625 fr.sp = regs->ARM_sp;
626 fr.lr = regs->ARM_lr;
627 fr.pc = regs->ARM_pc;
628 walk_stackframe(&fr, callchain_trace, entry);
629}
Marc Zyngiere50c5412012-09-13 16:40:46 +0100630
631unsigned long perf_instruction_pointer(struct pt_regs *regs)
632{
633 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
634 return perf_guest_cbs->get_guest_ip();
635
636 return instruction_pointer(regs);
637}
638
639unsigned long perf_misc_flags(struct pt_regs *regs)
640{
641 int misc = 0;
642
643 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
644 if (perf_guest_cbs->is_user_mode())
645 misc |= PERF_RECORD_MISC_GUEST_USER;
646 else
647 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
648 } else {
649 if (user_mode(regs))
650 misc |= PERF_RECORD_MISC_USER;
651 else
652 misc |= PERF_RECORD_MISC_KERNEL;
653 }
654
655 return misc;
656}