blob: 1cfa3f35713e984ba92abf879dc6b9ae2feec66a [file] [log] [blame]
Jamie Iles1b8873a2010-02-02 20:25:44 +01001#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
Will Deacon43eab872010-11-13 19:04:32 +00007 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
Jean PIHET796d1292010-01-26 18:51:05 +01008 *
Jamie Iles1b8873a2010-02-02 20:25:44 +01009 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
11 * code.
12 */
13#define pr_fmt(fmt) "hw perfevents: " fmt
14
Jamie Iles1b8873a2010-02-02 20:25:44 +010015#include <linux/kernel.h>
Will Deacon49c006b2010-04-29 17:13:24 +010016#include <linux/platform_device.h>
Jon Hunter7be29582012-05-31 13:05:20 -050017#include <linux/pm_runtime.h>
Will Deacon5505b202012-07-29 13:09:14 +010018#include <linux/uaccess.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010019
Jamie Iles1b8873a2010-02-02 20:25:44 +010020#include <asm/irq_regs.h>
21#include <asm/pmu.h>
22#include <asm/stacktrace.h>
23
Jamie Iles1b8873a2010-02-02 20:25:44 +010024static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010025armpmu_map_cache_event(const unsigned (*cache_map)
26 [PERF_COUNT_HW_CACHE_MAX]
27 [PERF_COUNT_HW_CACHE_OP_MAX]
28 [PERF_COUNT_HW_CACHE_RESULT_MAX],
29 u64 config)
Jamie Iles1b8873a2010-02-02 20:25:44 +010030{
31 unsigned int cache_type, cache_op, cache_result, ret;
32
33 cache_type = (config >> 0) & 0xff;
34 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
35 return -EINVAL;
36
37 cache_op = (config >> 8) & 0xff;
38 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
39 return -EINVAL;
40
41 cache_result = (config >> 16) & 0xff;
42 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
43 return -EINVAL;
44
Mark Rutlande1f431b2011-04-28 15:47:10 +010045 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
Jamie Iles1b8873a2010-02-02 20:25:44 +010046
47 if (ret == CACHE_OP_UNSUPPORTED)
48 return -ENOENT;
49
50 return ret;
51}
52
53static int
Will Deacon6dbc0022012-07-29 12:36:28 +010054armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000055{
Mark Rutlande1f431b2011-04-28 15:47:10 +010056 int mapping = (*event_map)[config];
57 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
Will Deacon84fee972010-11-13 17:13:56 +000058}
59
60static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010061armpmu_map_raw_event(u32 raw_event_mask, u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000062{
Mark Rutlande1f431b2011-04-28 15:47:10 +010063 return (int)(config & raw_event_mask);
64}
65
Will Deacon6dbc0022012-07-29 12:36:28 +010066int
67armpmu_map_event(struct perf_event *event,
68 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
69 const unsigned (*cache_map)
70 [PERF_COUNT_HW_CACHE_MAX]
71 [PERF_COUNT_HW_CACHE_OP_MAX]
72 [PERF_COUNT_HW_CACHE_RESULT_MAX],
73 u32 raw_event_mask)
Mark Rutlande1f431b2011-04-28 15:47:10 +010074{
75 u64 config = event->attr.config;
76
77 switch (event->attr.type) {
78 case PERF_TYPE_HARDWARE:
Will Deacon6dbc0022012-07-29 12:36:28 +010079 return armpmu_map_hw_event(event_map, config);
Mark Rutlande1f431b2011-04-28 15:47:10 +010080 case PERF_TYPE_HW_CACHE:
81 return armpmu_map_cache_event(cache_map, config);
82 case PERF_TYPE_RAW:
83 return armpmu_map_raw_event(raw_event_mask, config);
84 }
85
86 return -ENOENT;
Will Deacon84fee972010-11-13 17:13:56 +000087}
88
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010089int armpmu_event_set_period(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +010090{
Mark Rutland8a16b342011-04-28 16:27:54 +010091 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010092 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +020093 s64 left = local64_read(&hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +010094 s64 period = hwc->sample_period;
95 int ret = 0;
96
Will Deacon3581fe02012-10-17 12:01:34 +010097 /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
98 if (unlikely(period != hwc->last_period))
99 left = period - (hwc->last_period - left);
100
Jamie Iles1b8873a2010-02-02 20:25:44 +0100101 if (unlikely(left <= -period)) {
102 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200103 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100104 hwc->last_period = period;
105 ret = 1;
106 }
107
108 if (unlikely(left <= 0)) {
109 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200110 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100111 hwc->last_period = period;
112 ret = 1;
113 }
114
115 if (left > (s64)armpmu->max_period)
116 left = armpmu->max_period;
117
Peter Zijlstrae7850592010-05-21 14:43:08 +0200118 local64_set(&hwc->prev_count, (u64)-left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100119
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100120 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100121
122 perf_event_update_userpage(event);
123
124 return ret;
125}
126
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100127u64 armpmu_event_update(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100128{
Mark Rutland8a16b342011-04-28 16:27:54 +0100129 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100130 struct hw_perf_event *hwc = &event->hw;
Will Deacona7378232011-03-25 17:12:37 +0100131 u64 delta, prev_raw_count, new_raw_count;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100132
133again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200134 prev_raw_count = local64_read(&hwc->prev_count);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100135 new_raw_count = armpmu->read_counter(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100136
Peter Zijlstrae7850592010-05-21 14:43:08 +0200137 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100138 new_raw_count) != prev_raw_count)
139 goto again;
140
Will Deacon57273472012-03-06 17:33:17 +0100141 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100142
Peter Zijlstrae7850592010-05-21 14:43:08 +0200143 local64_add(delta, &event->count);
144 local64_sub(delta, &hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100145
146 return new_raw_count;
147}
148
149static void
Jamie Iles1b8873a2010-02-02 20:25:44 +0100150armpmu_read(struct perf_event *event)
151{
152 struct hw_perf_event *hwc = &event->hw;
153
154 /* Don't read disabled counters! */
155 if (hwc->idx < 0)
156 return;
157
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100158 armpmu_event_update(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100159}
160
161static void
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200162armpmu_stop(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100163{
Mark Rutland8a16b342011-04-28 16:27:54 +0100164 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100165 struct hw_perf_event *hwc = &event->hw;
166
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200167 /*
168 * ARM pmu always has to update the counter, so ignore
169 * PERF_EF_UPDATE, see comments in armpmu_start().
170 */
171 if (!(hwc->state & PERF_HES_STOPPED)) {
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100172 armpmu->disable(event);
173 armpmu_event_update(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200174 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
175 }
176}
177
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100178static void armpmu_start(struct perf_event *event, int flags)
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200179{
Mark Rutland8a16b342011-04-28 16:27:54 +0100180 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200181 struct hw_perf_event *hwc = &event->hw;
182
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200183 /*
184 * ARM pmu always has to reprogram the period, so ignore
185 * PERF_EF_RELOAD, see the comment below.
186 */
187 if (flags & PERF_EF_RELOAD)
188 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
189
190 hwc->state = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100191 /*
192 * Set the period again. Some counters can't be stopped, so when we
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200193 * were stopped we simply disabled the IRQ source and the counter
Jamie Iles1b8873a2010-02-02 20:25:44 +0100194 * may have been left counting. If we don't do this step then we may
195 * get an interrupt too soon or *way* too late if the overflow has
196 * happened since disabling.
197 */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100198 armpmu_event_set_period(event);
199 armpmu->enable(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100200}
201
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200202static void
203armpmu_del(struct perf_event *event, int flags)
204{
Mark Rutland8a16b342011-04-28 16:27:54 +0100205 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100206 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200207 struct hw_perf_event *hwc = &event->hw;
208 int idx = hwc->idx;
209
210 WARN_ON(idx < 0);
211
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200212 armpmu_stop(event, PERF_EF_UPDATE);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100213 hw_events->events[idx] = NULL;
214 clear_bit(idx, hw_events->used_mask);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200215
216 perf_event_update_userpage(event);
217}
218
Jamie Iles1b8873a2010-02-02 20:25:44 +0100219static int
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200220armpmu_add(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100221{
Mark Rutland8a16b342011-04-28 16:27:54 +0100222 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100223 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100224 struct hw_perf_event *hwc = &event->hw;
225 int idx;
226 int err = 0;
227
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200228 perf_pmu_disable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200229
Jamie Iles1b8873a2010-02-02 20:25:44 +0100230 /* If we don't have a space for the counter then finish early. */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100231 idx = armpmu->get_event_idx(hw_events, event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100232 if (idx < 0) {
233 err = idx;
234 goto out;
235 }
236
237 /*
238 * If there is an event in the counter we are going to use then make
239 * sure it is disabled.
240 */
241 event->hw.idx = idx;
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100242 armpmu->disable(event);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100243 hw_events->events[idx] = event;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100244
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200245 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
246 if (flags & PERF_EF_START)
247 armpmu_start(event, PERF_EF_RELOAD);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100248
249 /* Propagate our changes to the userspace mapping. */
250 perf_event_update_userpage(event);
251
252out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200253 perf_pmu_enable(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100254 return err;
255}
256
Jamie Iles1b8873a2010-02-02 20:25:44 +0100257static int
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100258validate_event(struct pmu_hw_events *hw_events,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100259 struct perf_event *event)
260{
Mark Rutland8a16b342011-04-28 16:27:54 +0100261 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland7b9f72c2011-04-27 16:22:21 +0100262 struct pmu *leader_pmu = event->group_leader->pmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100263
Mark Rutland7b9f72c2011-04-27 16:22:21 +0100264 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
Will Deacon65b47112010-09-02 09:32:08 +0100265 return 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100266
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100267 return armpmu->get_event_idx(hw_events, event) >= 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100268}
269
270static int
271validate_group(struct perf_event *event)
272{
273 struct perf_event *sibling, *leader = event->group_leader;
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100274 struct pmu_hw_events fake_pmu;
Will Deaconbce34d12011-11-17 15:05:14 +0000275 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100276
Will Deaconbce34d12011-11-17 15:05:14 +0000277 /*
278 * Initialise the fake PMU. We only need to populate the
279 * used_mask for the purposes of validation.
280 */
281 memset(fake_used_mask, 0, sizeof(fake_used_mask));
282 fake_pmu.used_mask = fake_used_mask;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100283
284 if (!validate_event(&fake_pmu, leader))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100285 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100286
287 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
288 if (!validate_event(&fake_pmu, sibling))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100289 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100290 }
291
292 if (!validate_event(&fake_pmu, event))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100293 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100294
295 return 0;
296}
297
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100298static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530299{
Mark Rutland8a16b342011-04-28 16:27:54 +0100300 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
Mark Rutlanda9356a02011-05-04 09:23:15 +0100301 struct platform_device *plat_device = armpmu->plat_device;
302 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530303
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100304 if (plat && plat->handle_irq)
305 return plat->handle_irq(irq, dev, armpmu->handle_irq);
306 else
307 return armpmu->handle_irq(irq, dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530308}
309
Will Deacon0b390e22011-07-27 15:18:59 +0100310static void
Mark Rutland8a16b342011-04-28 16:27:54 +0100311armpmu_release_hardware(struct arm_pmu *armpmu)
Will Deacon0b390e22011-07-27 15:18:59 +0100312{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100313 armpmu->free_irq(armpmu);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100314 pm_runtime_put_sync(&armpmu->plat_device->dev);
Will Deacon0b390e22011-07-27 15:18:59 +0100315}
316
Jamie Iles1b8873a2010-02-02 20:25:44 +0100317static int
Mark Rutland8a16b342011-04-28 16:27:54 +0100318armpmu_reserve_hardware(struct arm_pmu *armpmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100319{
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100320 int err;
Mark Rutlanda9356a02011-05-04 09:23:15 +0100321 struct platform_device *pmu_device = armpmu->plat_device;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100322
Will Deacone5a21322011-11-22 18:01:46 +0000323 if (!pmu_device)
324 return -ENODEV;
325
Jon Hunter7be29582012-05-31 13:05:20 -0500326 pm_runtime_get_sync(&pmu_device->dev);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100327 err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100328 if (err) {
329 armpmu_release_hardware(armpmu);
330 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100331 }
332
Will Deacon0b390e22011-07-27 15:18:59 +0100333 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100334}
335
Jamie Iles1b8873a2010-02-02 20:25:44 +0100336static void
337hw_perf_event_destroy(struct perf_event *event)
338{
Mark Rutland8a16b342011-04-28 16:27:54 +0100339 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100340 atomic_t *active_events = &armpmu->active_events;
341 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
342
343 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
Mark Rutland8a16b342011-04-28 16:27:54 +0100344 armpmu_release_hardware(armpmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100345 mutex_unlock(pmu_reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100346 }
347}
348
349static int
Will Deacon05d22fd2011-07-19 11:57:30 +0100350event_requires_mode_exclusion(struct perf_event_attr *attr)
351{
352 return attr->exclude_idle || attr->exclude_user ||
353 attr->exclude_kernel || attr->exclude_hv;
354}
355
356static int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100357__hw_perf_event_init(struct perf_event *event)
358{
Mark Rutland8a16b342011-04-28 16:27:54 +0100359 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100360 struct hw_perf_event *hwc = &event->hw;
361 int mapping, err;
362
Mark Rutlande1f431b2011-04-28 15:47:10 +0100363 mapping = armpmu->map_event(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100364
365 if (mapping < 0) {
366 pr_debug("event %x:%llx not supported\n", event->attr.type,
367 event->attr.config);
368 return mapping;
369 }
370
371 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100372 * We don't assign an index until we actually place the event onto
373 * hardware. Use -1 to signify that we haven't decided where to put it
374 * yet. For SMP systems, each core has it's own PMU so we can't do any
375 * clever allocation or constraints checking at this point.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100376 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100377 hwc->idx = -1;
378 hwc->config_base = 0;
379 hwc->config = 0;
380 hwc->event_base = 0;
381
382 /*
383 * Check whether we need to exclude the counter from certain modes.
384 */
385 if ((!armpmu->set_event_filter ||
386 armpmu->set_event_filter(hwc, &event->attr)) &&
387 event_requires_mode_exclusion(&event->attr)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100388 pr_debug("ARM performance counters do not support "
389 "mode exclusion\n");
Will Deaconfdeb8e32012-07-04 18:15:42 +0100390 return -EOPNOTSUPP;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100391 }
392
393 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100394 * Store the event encoding into the config_base field.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100395 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100396 hwc->config_base |= (unsigned long)mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100397
398 if (!hwc->sample_period) {
Will Deacon57273472012-03-06 17:33:17 +0100399 /*
400 * For non-sampling runs, limit the sample_period to half
401 * of the counter width. That way, the new counter value
402 * is far less likely to overtake the previous one unless
403 * you have some serious IRQ latency issues.
404 */
405 hwc->sample_period = armpmu->max_period >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100406 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200407 local64_set(&hwc->period_left, hwc->sample_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100408 }
409
410 err = 0;
411 if (event->group_leader != event) {
412 err = validate_group(event);
413 if (err)
414 return -EINVAL;
415 }
416
417 return err;
418}
419
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200420static int armpmu_event_init(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100421{
Mark Rutland8a16b342011-04-28 16:27:54 +0100422 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100423 int err = 0;
Mark Rutland03b78982011-04-27 11:20:11 +0100424 atomic_t *active_events = &armpmu->active_events;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100425
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100426 /* does not support taken branch sampling */
427 if (has_branch_stack(event))
428 return -EOPNOTSUPP;
429
Mark Rutlande1f431b2011-04-28 15:47:10 +0100430 if (armpmu->map_event(event) == -ENOENT)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200431 return -ENOENT;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200432
Jamie Iles1b8873a2010-02-02 20:25:44 +0100433 event->destroy = hw_perf_event_destroy;
434
Mark Rutland03b78982011-04-27 11:20:11 +0100435 if (!atomic_inc_not_zero(active_events)) {
436 mutex_lock(&armpmu->reserve_mutex);
437 if (atomic_read(active_events) == 0)
Mark Rutland8a16b342011-04-28 16:27:54 +0100438 err = armpmu_reserve_hardware(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100439
440 if (!err)
Mark Rutland03b78982011-04-27 11:20:11 +0100441 atomic_inc(active_events);
442 mutex_unlock(&armpmu->reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100443 }
444
445 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200446 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100447
448 err = __hw_perf_event_init(event);
449 if (err)
450 hw_perf_event_destroy(event);
451
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200452 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100453}
454
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200455static void armpmu_enable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100456{
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100457 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100458 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Mark Rutland7325eae2011-08-23 11:59:49 +0100459 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100460
Will Deaconf4f38432011-07-01 14:38:12 +0100461 if (enabled)
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100462 armpmu->start(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100463}
464
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200465static void armpmu_disable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100466{
Mark Rutland8a16b342011-04-28 16:27:54 +0100467 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100468 armpmu->stop(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100469}
470
Jon Hunter7be29582012-05-31 13:05:20 -0500471#ifdef CONFIG_PM_RUNTIME
472static int armpmu_runtime_resume(struct device *dev)
473{
474 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
475
476 if (plat && plat->runtime_resume)
477 return plat->runtime_resume(dev);
478
479 return 0;
480}
481
482static int armpmu_runtime_suspend(struct device *dev)
483{
484 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
485
486 if (plat && plat->runtime_suspend)
487 return plat->runtime_suspend(dev);
488
489 return 0;
490}
491#endif
492
Will Deacon6dbc0022012-07-29 12:36:28 +0100493const struct dev_pm_ops armpmu_dev_pm_ops = {
494 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
495};
496
Mark Rutland03b78982011-04-27 11:20:11 +0100497static void __init armpmu_init(struct arm_pmu *armpmu)
498{
499 atomic_set(&armpmu->active_events, 0);
500 mutex_init(&armpmu->reserve_mutex);
Mark Rutland8a16b342011-04-28 16:27:54 +0100501
502 armpmu->pmu = (struct pmu) {
503 .pmu_enable = armpmu_enable,
504 .pmu_disable = armpmu_disable,
505 .event_init = armpmu_event_init,
506 .add = armpmu_add,
507 .del = armpmu_del,
508 .start = armpmu_start,
509 .stop = armpmu_stop,
510 .read = armpmu_read,
511 };
512}
513
Will Deacon04236f92012-07-28 17:42:22 +0100514int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
Mark Rutland8a16b342011-04-28 16:27:54 +0100515{
516 armpmu_init(armpmu);
Will Deacon04236f92012-07-28 17:42:22 +0100517 pr_info("enabled with %s PMU driver, %d counters available\n",
518 armpmu->name, armpmu->num_events);
Mark Rutland8a16b342011-04-28 16:27:54 +0100519 return perf_pmu_register(&armpmu->pmu, name, type);
Mark Rutland03b78982011-04-27 11:20:11 +0100520}
521
Jamie Iles1b8873a2010-02-02 20:25:44 +0100522/*
523 * Callchain handling code.
524 */
Jamie Iles1b8873a2010-02-02 20:25:44 +0100525
526/*
527 * The registers we're interested in are at the end of the variable
528 * length saved register structure. The fp points at the end of this
529 * structure so the address of this struct is:
530 * (struct frame_tail *)(xxx->fp)-1
531 *
532 * This code has been adapted from the ARM OProfile support.
533 */
534struct frame_tail {
Will Deacon4d6b7a72010-11-30 18:15:53 +0100535 struct frame_tail __user *fp;
536 unsigned long sp;
537 unsigned long lr;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100538} __attribute__((packed));
539
540/*
541 * Get the return address for a single stackframe and return a pointer to the
542 * next frame tail.
543 */
Will Deacon4d6b7a72010-11-30 18:15:53 +0100544static struct frame_tail __user *
545user_backtrace(struct frame_tail __user *tail,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100546 struct perf_callchain_entry *entry)
547{
548 struct frame_tail buftail;
549
550 /* Also check accessibility of one struct frame_tail beyond */
551 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
552 return NULL;
553 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
554 return NULL;
555
Frederic Weisbecker70791ce2010-06-29 19:34:05 +0200556 perf_callchain_store(entry, buftail.lr);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100557
558 /*
559 * Frame pointers should strictly progress back up the stack
560 * (towards higher addresses).
561 */
Rabin Vincentcb061992011-02-09 11:35:12 +0100562 if (tail + 1 >= buftail.fp)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100563 return NULL;
564
565 return buftail.fp - 1;
566}
567
Frederic Weisbecker56962b4442010-06-30 23:03:51 +0200568void
569perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100570{
Will Deacon4d6b7a72010-11-30 18:15:53 +0100571 struct frame_tail __user *tail;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100572
Marc Zyngiere50c5412012-09-13 16:40:46 +0100573 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
574 /* We don't support guest os callchain now */
575 return;
576 }
Jamie Iles1b8873a2010-02-02 20:25:44 +0100577
Will Deacon4d6b7a72010-11-30 18:15:53 +0100578 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100579
Sonny Rao860ad782011-04-18 22:12:59 +0100580 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
581 tail && !((unsigned long)tail & 0x3))
Jamie Iles1b8873a2010-02-02 20:25:44 +0100582 tail = user_backtrace(tail, entry);
583}
584
585/*
586 * Gets called by walk_stackframe() for every stackframe. This will be called
587 * whist unwinding the stackframe and is like a subroutine return so we use
588 * the PC.
589 */
590static int
591callchain_trace(struct stackframe *fr,
592 void *data)
593{
594 struct perf_callchain_entry *entry = data;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +0200595 perf_callchain_store(entry, fr->pc);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100596 return 0;
597}
598
Frederic Weisbecker56962b4442010-06-30 23:03:51 +0200599void
600perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100601{
602 struct stackframe fr;
603
Marc Zyngiere50c5412012-09-13 16:40:46 +0100604 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
605 /* We don't support guest os callchain now */
606 return;
607 }
608
Jamie Iles1b8873a2010-02-02 20:25:44 +0100609 fr.fp = regs->ARM_fp;
610 fr.sp = regs->ARM_sp;
611 fr.lr = regs->ARM_lr;
612 fr.pc = regs->ARM_pc;
613 walk_stackframe(&fr, callchain_trace, entry);
614}
Marc Zyngiere50c5412012-09-13 16:40:46 +0100615
616unsigned long perf_instruction_pointer(struct pt_regs *regs)
617{
618 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
619 return perf_guest_cbs->get_guest_ip();
620
621 return instruction_pointer(regs);
622}
623
624unsigned long perf_misc_flags(struct pt_regs *regs)
625{
626 int misc = 0;
627
628 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
629 if (perf_guest_cbs->is_user_mode())
630 misc |= PERF_RECORD_MISC_GUEST_USER;
631 else
632 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
633 } else {
634 if (user_mode(regs))
635 misc |= PERF_RECORD_MISC_USER;
636 else
637 misc |= PERF_RECORD_MISC_KERNEL;
638 }
639
640 return misc;
641}