blob: 7ffb267fb6288dde4f634ced4e68b42cb6113aca [file] [log] [blame]
Jamie Iles1b8873a2010-02-02 20:25:44 +01001#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
Will Deacon43eab872010-11-13 19:04:32 +00007 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
Jean PIHET796d1292010-01-26 18:51:05 +01008 *
Jamie Iles1b8873a2010-02-02 20:25:44 +01009 * This code is based on the sparc64 perf event code, which is in turn based
Mark Rutlandd39976f2014-09-29 17:15:32 +010010 * on the x86 code.
Jamie Iles1b8873a2010-02-02 20:25:44 +010011 */
12#define pr_fmt(fmt) "hw perfevents: " fmt
13
Jamie Iles1b8873a2010-02-02 20:25:44 +010014#include <linux/kernel.h>
Will Deacon49c006b2010-04-29 17:13:24 +010015#include <linux/platform_device.h>
Jon Hunter7be29582012-05-31 13:05:20 -050016#include <linux/pm_runtime.h>
Stephen Boydbbd64552014-02-07 21:01:19 +000017#include <linux/irq.h>
18#include <linux/irqdesc.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010019
Jamie Iles1b8873a2010-02-02 20:25:44 +010020#include <asm/irq_regs.h>
21#include <asm/pmu.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010022
Jamie Iles1b8873a2010-02-02 20:25:44 +010023static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010024armpmu_map_cache_event(const unsigned (*cache_map)
25 [PERF_COUNT_HW_CACHE_MAX]
26 [PERF_COUNT_HW_CACHE_OP_MAX]
27 [PERF_COUNT_HW_CACHE_RESULT_MAX],
28 u64 config)
Jamie Iles1b8873a2010-02-02 20:25:44 +010029{
30 unsigned int cache_type, cache_op, cache_result, ret;
31
32 cache_type = (config >> 0) & 0xff;
33 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
34 return -EINVAL;
35
36 cache_op = (config >> 8) & 0xff;
37 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
38 return -EINVAL;
39
40 cache_result = (config >> 16) & 0xff;
41 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
42 return -EINVAL;
43
Mark Rutlande1f431b2011-04-28 15:47:10 +010044 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
Jamie Iles1b8873a2010-02-02 20:25:44 +010045
46 if (ret == CACHE_OP_UNSUPPORTED)
47 return -ENOENT;
48
49 return ret;
50}
51
52static int
Will Deacon6dbc0022012-07-29 12:36:28 +010053armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000054{
Stephen Boydd9f96632013-08-08 18:41:59 +010055 int mapping;
56
57 if (config >= PERF_COUNT_HW_MAX)
58 return -EINVAL;
59
60 mapping = (*event_map)[config];
Mark Rutlande1f431b2011-04-28 15:47:10 +010061 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
Will Deacon84fee972010-11-13 17:13:56 +000062}
63
64static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010065armpmu_map_raw_event(u32 raw_event_mask, u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000066{
Mark Rutlande1f431b2011-04-28 15:47:10 +010067 return (int)(config & raw_event_mask);
68}
69
Will Deacon6dbc0022012-07-29 12:36:28 +010070int
71armpmu_map_event(struct perf_event *event,
72 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
73 const unsigned (*cache_map)
74 [PERF_COUNT_HW_CACHE_MAX]
75 [PERF_COUNT_HW_CACHE_OP_MAX]
76 [PERF_COUNT_HW_CACHE_RESULT_MAX],
77 u32 raw_event_mask)
Mark Rutlande1f431b2011-04-28 15:47:10 +010078{
79 u64 config = event->attr.config;
Mark Rutland67b43052012-09-12 10:53:23 +010080 int type = event->attr.type;
Mark Rutlande1f431b2011-04-28 15:47:10 +010081
Mark Rutland67b43052012-09-12 10:53:23 +010082 if (type == event->pmu->type)
83 return armpmu_map_raw_event(raw_event_mask, config);
84
85 switch (type) {
Mark Rutlande1f431b2011-04-28 15:47:10 +010086 case PERF_TYPE_HARDWARE:
Will Deacon6dbc0022012-07-29 12:36:28 +010087 return armpmu_map_hw_event(event_map, config);
Mark Rutlande1f431b2011-04-28 15:47:10 +010088 case PERF_TYPE_HW_CACHE:
89 return armpmu_map_cache_event(cache_map, config);
90 case PERF_TYPE_RAW:
91 return armpmu_map_raw_event(raw_event_mask, config);
92 }
93
94 return -ENOENT;
Will Deacon84fee972010-11-13 17:13:56 +000095}
96
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010097int armpmu_event_set_period(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +010098{
Mark Rutland8a16b342011-04-28 16:27:54 +010099 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100100 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200101 s64 left = local64_read(&hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100102 s64 period = hwc->sample_period;
103 int ret = 0;
104
105 if (unlikely(left <= -period)) {
106 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200107 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100108 hwc->last_period = period;
109 ret = 1;
110 }
111
112 if (unlikely(left <= 0)) {
113 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200114 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100115 hwc->last_period = period;
116 ret = 1;
117 }
118
119 if (left > (s64)armpmu->max_period)
120 left = armpmu->max_period;
121
Peter Zijlstrae7850592010-05-21 14:43:08 +0200122 local64_set(&hwc->prev_count, (u64)-left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100123
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100124 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100125
126 perf_event_update_userpage(event);
127
128 return ret;
129}
130
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100131u64 armpmu_event_update(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100132{
Mark Rutland8a16b342011-04-28 16:27:54 +0100133 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100134 struct hw_perf_event *hwc = &event->hw;
Will Deacona7378232011-03-25 17:12:37 +0100135 u64 delta, prev_raw_count, new_raw_count;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100136
137again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200138 prev_raw_count = local64_read(&hwc->prev_count);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100139 new_raw_count = armpmu->read_counter(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100140
Peter Zijlstrae7850592010-05-21 14:43:08 +0200141 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100142 new_raw_count) != prev_raw_count)
143 goto again;
144
Will Deacon57273472012-03-06 17:33:17 +0100145 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100146
Peter Zijlstrae7850592010-05-21 14:43:08 +0200147 local64_add(delta, &event->count);
148 local64_sub(delta, &hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100149
150 return new_raw_count;
151}
152
153static void
Jamie Iles1b8873a2010-02-02 20:25:44 +0100154armpmu_read(struct perf_event *event)
155{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100156 armpmu_event_update(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100157}
158
159static void
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200160armpmu_stop(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100161{
Mark Rutland8a16b342011-04-28 16:27:54 +0100162 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100163 struct hw_perf_event *hwc = &event->hw;
164
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200165 /*
166 * ARM pmu always has to update the counter, so ignore
167 * PERF_EF_UPDATE, see comments in armpmu_start().
168 */
169 if (!(hwc->state & PERF_HES_STOPPED)) {
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100170 armpmu->disable(event);
171 armpmu_event_update(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200172 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
173 }
174}
175
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100176static void armpmu_start(struct perf_event *event, int flags)
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200177{
Mark Rutland8a16b342011-04-28 16:27:54 +0100178 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200179 struct hw_perf_event *hwc = &event->hw;
180
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200181 /*
182 * ARM pmu always has to reprogram the period, so ignore
183 * PERF_EF_RELOAD, see the comment below.
184 */
185 if (flags & PERF_EF_RELOAD)
186 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
187
188 hwc->state = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100189 /*
190 * Set the period again. Some counters can't be stopped, so when we
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200191 * were stopped we simply disabled the IRQ source and the counter
Jamie Iles1b8873a2010-02-02 20:25:44 +0100192 * may have been left counting. If we don't do this step then we may
193 * get an interrupt too soon or *way* too late if the overflow has
194 * happened since disabling.
195 */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100196 armpmu_event_set_period(event);
197 armpmu->enable(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100198}
199
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200200static void
201armpmu_del(struct perf_event *event, int flags)
202{
Mark Rutland8a16b342011-04-28 16:27:54 +0100203 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100204 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200205 struct hw_perf_event *hwc = &event->hw;
206 int idx = hwc->idx;
207
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200208 armpmu_stop(event, PERF_EF_UPDATE);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100209 hw_events->events[idx] = NULL;
210 clear_bit(idx, hw_events->used_mask);
Stephen Boydeab443e2014-02-07 21:01:22 +0000211 if (armpmu->clear_event_idx)
212 armpmu->clear_event_idx(hw_events, event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200213
214 perf_event_update_userpage(event);
215}
216
Jamie Iles1b8873a2010-02-02 20:25:44 +0100217static int
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200218armpmu_add(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100219{
Mark Rutland8a16b342011-04-28 16:27:54 +0100220 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100221 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100222 struct hw_perf_event *hwc = &event->hw;
223 int idx;
224 int err = 0;
225
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200226 perf_pmu_disable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200227
Jamie Iles1b8873a2010-02-02 20:25:44 +0100228 /* If we don't have a space for the counter then finish early. */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100229 idx = armpmu->get_event_idx(hw_events, event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100230 if (idx < 0) {
231 err = idx;
232 goto out;
233 }
234
235 /*
236 * If there is an event in the counter we are going to use then make
237 * sure it is disabled.
238 */
239 event->hw.idx = idx;
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100240 armpmu->disable(event);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100241 hw_events->events[idx] = event;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100242
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200243 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
244 if (flags & PERF_EF_START)
245 armpmu_start(event, PERF_EF_RELOAD);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100246
247 /* Propagate our changes to the userspace mapping. */
248 perf_event_update_userpage(event);
249
250out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200251 perf_pmu_enable(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100252 return err;
253}
254
Jamie Iles1b8873a2010-02-02 20:25:44 +0100255static int
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100256validate_event(struct pmu_hw_events *hw_events,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100257 struct perf_event *event)
258{
Mark Rutland8a16b342011-04-28 16:27:54 +0100259 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100260
Will Deaconc95eb312013-08-07 23:39:41 +0100261 if (is_software_event(event))
262 return 1;
263
Will Deacon2dfcb802013-10-09 13:51:29 +0100264 if (event->state < PERF_EVENT_STATE_OFF)
Will Deaconcb2d8b32013-04-12 19:04:19 +0100265 return 1;
266
267 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
Will Deacon65b47112010-09-02 09:32:08 +0100268 return 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100269
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100270 return armpmu->get_event_idx(hw_events, event) >= 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100271}
272
273static int
274validate_group(struct perf_event *event)
275{
276 struct perf_event *sibling, *leader = event->group_leader;
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100277 struct pmu_hw_events fake_pmu;
Will Deaconbce34d12011-11-17 15:05:14 +0000278 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100279
Will Deaconbce34d12011-11-17 15:05:14 +0000280 /*
281 * Initialise the fake PMU. We only need to populate the
282 * used_mask for the purposes of validation.
283 */
284 memset(fake_used_mask, 0, sizeof(fake_used_mask));
285 fake_pmu.used_mask = fake_used_mask;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100286
287 if (!validate_event(&fake_pmu, leader))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100288 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100289
290 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
291 if (!validate_event(&fake_pmu, sibling))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100292 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100293 }
294
295 if (!validate_event(&fake_pmu, event))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100296 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100297
298 return 0;
299}
300
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100301static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530302{
Stephen Boydbbd64552014-02-07 21:01:19 +0000303 struct arm_pmu *armpmu;
304 struct platform_device *plat_device;
305 struct arm_pmu_platdata *plat;
Will Deacon5f5092e2014-02-11 18:08:41 +0000306 int ret;
307 u64 start_clock, finish_clock;
Stephen Boydbbd64552014-02-07 21:01:19 +0000308
309 if (irq_is_percpu(irq))
310 dev = *(void **)dev;
311 armpmu = dev;
312 plat_device = armpmu->plat_device;
313 plat = dev_get_platdata(&plat_device->dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530314
Will Deacon5f5092e2014-02-11 18:08:41 +0000315 start_clock = sched_clock();
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100316 if (plat && plat->handle_irq)
Will Deacon5f5092e2014-02-11 18:08:41 +0000317 ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100318 else
Will Deacon5f5092e2014-02-11 18:08:41 +0000319 ret = armpmu->handle_irq(irq, dev);
320 finish_clock = sched_clock();
321
322 perf_sample_event_took(finish_clock - start_clock);
323 return ret;
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530324}
325
Will Deacon0b390e22011-07-27 15:18:59 +0100326static void
Mark Rutland8a16b342011-04-28 16:27:54 +0100327armpmu_release_hardware(struct arm_pmu *armpmu)
Will Deacon0b390e22011-07-27 15:18:59 +0100328{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100329 armpmu->free_irq(armpmu);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100330 pm_runtime_put_sync(&armpmu->plat_device->dev);
Will Deacon0b390e22011-07-27 15:18:59 +0100331}
332
Jamie Iles1b8873a2010-02-02 20:25:44 +0100333static int
Mark Rutland8a16b342011-04-28 16:27:54 +0100334armpmu_reserve_hardware(struct arm_pmu *armpmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100335{
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100336 int err;
Mark Rutlanda9356a02011-05-04 09:23:15 +0100337 struct platform_device *pmu_device = armpmu->plat_device;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100338
Will Deacone5a21322011-11-22 18:01:46 +0000339 if (!pmu_device)
340 return -ENODEV;
341
Jon Hunter7be29582012-05-31 13:05:20 -0500342 pm_runtime_get_sync(&pmu_device->dev);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100343 err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100344 if (err) {
345 armpmu_release_hardware(armpmu);
346 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100347 }
348
Will Deacon0b390e22011-07-27 15:18:59 +0100349 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100350}
351
Jamie Iles1b8873a2010-02-02 20:25:44 +0100352static void
353hw_perf_event_destroy(struct perf_event *event)
354{
Mark Rutland8a16b342011-04-28 16:27:54 +0100355 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100356 atomic_t *active_events = &armpmu->active_events;
357 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
358
359 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
Mark Rutland8a16b342011-04-28 16:27:54 +0100360 armpmu_release_hardware(armpmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100361 mutex_unlock(pmu_reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100362 }
363}
364
365static int
Will Deacon05d22fd2011-07-19 11:57:30 +0100366event_requires_mode_exclusion(struct perf_event_attr *attr)
367{
368 return attr->exclude_idle || attr->exclude_user ||
369 attr->exclude_kernel || attr->exclude_hv;
370}
371
372static int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100373__hw_perf_event_init(struct perf_event *event)
374{
Mark Rutland8a16b342011-04-28 16:27:54 +0100375 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100376 struct hw_perf_event *hwc = &event->hw;
Mark Rutland9dcbf462013-01-18 16:10:06 +0000377 int mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100378
Mark Rutlande1f431b2011-04-28 15:47:10 +0100379 mapping = armpmu->map_event(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100380
381 if (mapping < 0) {
382 pr_debug("event %x:%llx not supported\n", event->attr.type,
383 event->attr.config);
384 return mapping;
385 }
386
387 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100388 * We don't assign an index until we actually place the event onto
389 * hardware. Use -1 to signify that we haven't decided where to put it
390 * yet. For SMP systems, each core has it's own PMU so we can't do any
391 * clever allocation or constraints checking at this point.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100392 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100393 hwc->idx = -1;
394 hwc->config_base = 0;
395 hwc->config = 0;
396 hwc->event_base = 0;
397
398 /*
399 * Check whether we need to exclude the counter from certain modes.
400 */
401 if ((!armpmu->set_event_filter ||
402 armpmu->set_event_filter(hwc, &event->attr)) &&
403 event_requires_mode_exclusion(&event->attr)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100404 pr_debug("ARM performance counters do not support "
405 "mode exclusion\n");
Will Deaconfdeb8e32012-07-04 18:15:42 +0100406 return -EOPNOTSUPP;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100407 }
408
409 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100410 * Store the event encoding into the config_base field.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100411 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100412 hwc->config_base |= (unsigned long)mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100413
Vince Weaveredcb4d32014-05-16 17:15:49 -0400414 if (!is_sampling_event(event)) {
Will Deacon57273472012-03-06 17:33:17 +0100415 /*
416 * For non-sampling runs, limit the sample_period to half
417 * of the counter width. That way, the new counter value
418 * is far less likely to overtake the previous one unless
419 * you have some serious IRQ latency issues.
420 */
421 hwc->sample_period = armpmu->max_period >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100422 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200423 local64_set(&hwc->period_left, hwc->sample_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100424 }
425
Jamie Iles1b8873a2010-02-02 20:25:44 +0100426 if (event->group_leader != event) {
Chen Gange595ede2013-02-28 17:51:29 +0100427 if (validate_group(event) != 0)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100428 return -EINVAL;
429 }
430
Mark Rutland9dcbf462013-01-18 16:10:06 +0000431 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100432}
433
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200434static int armpmu_event_init(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100435{
Mark Rutland8a16b342011-04-28 16:27:54 +0100436 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100437 int err = 0;
Mark Rutland03b78982011-04-27 11:20:11 +0100438 atomic_t *active_events = &armpmu->active_events;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100439
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100440 /* does not support taken branch sampling */
441 if (has_branch_stack(event))
442 return -EOPNOTSUPP;
443
Mark Rutlande1f431b2011-04-28 15:47:10 +0100444 if (armpmu->map_event(event) == -ENOENT)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200445 return -ENOENT;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200446
Jamie Iles1b8873a2010-02-02 20:25:44 +0100447 event->destroy = hw_perf_event_destroy;
448
Mark Rutland03b78982011-04-27 11:20:11 +0100449 if (!atomic_inc_not_zero(active_events)) {
450 mutex_lock(&armpmu->reserve_mutex);
451 if (atomic_read(active_events) == 0)
Mark Rutland8a16b342011-04-28 16:27:54 +0100452 err = armpmu_reserve_hardware(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100453
454 if (!err)
Mark Rutland03b78982011-04-27 11:20:11 +0100455 atomic_inc(active_events);
456 mutex_unlock(&armpmu->reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100457 }
458
459 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200460 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100461
462 err = __hw_perf_event_init(event);
463 if (err)
464 hw_perf_event_destroy(event);
465
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200466 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100467}
468
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200469static void armpmu_enable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100470{
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100471 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100472 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Mark Rutland7325eae2011-08-23 11:59:49 +0100473 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100474
Will Deaconf4f38432011-07-01 14:38:12 +0100475 if (enabled)
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100476 armpmu->start(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100477}
478
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200479static void armpmu_disable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100480{
Mark Rutland8a16b342011-04-28 16:27:54 +0100481 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100482 armpmu->stop(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100483}
484
Jon Hunter7be29582012-05-31 13:05:20 -0500485#ifdef CONFIG_PM_RUNTIME
486static int armpmu_runtime_resume(struct device *dev)
487{
488 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
489
490 if (plat && plat->runtime_resume)
491 return plat->runtime_resume(dev);
492
493 return 0;
494}
495
496static int armpmu_runtime_suspend(struct device *dev)
497{
498 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
499
500 if (plat && plat->runtime_suspend)
501 return plat->runtime_suspend(dev);
502
503 return 0;
504}
505#endif
506
Will Deacon6dbc0022012-07-29 12:36:28 +0100507const struct dev_pm_ops armpmu_dev_pm_ops = {
508 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
509};
510
Stephen Boyd44d6b1f2013-03-05 03:54:06 +0100511static void armpmu_init(struct arm_pmu *armpmu)
Mark Rutland03b78982011-04-27 11:20:11 +0100512{
513 atomic_set(&armpmu->active_events, 0);
514 mutex_init(&armpmu->reserve_mutex);
Mark Rutland8a16b342011-04-28 16:27:54 +0100515
516 armpmu->pmu = (struct pmu) {
517 .pmu_enable = armpmu_enable,
518 .pmu_disable = armpmu_disable,
519 .event_init = armpmu_event_init,
520 .add = armpmu_add,
521 .del = armpmu_del,
522 .start = armpmu_start,
523 .stop = armpmu_stop,
524 .read = armpmu_read,
525 };
526}
527
Will Deacon03052302012-09-21 14:23:47 +0100528int armpmu_register(struct arm_pmu *armpmu, int type)
Mark Rutland8a16b342011-04-28 16:27:54 +0100529{
530 armpmu_init(armpmu);
Jon Hunter2ac29a12012-10-25 21:23:18 +0100531 pm_runtime_enable(&armpmu->plat_device->dev);
Will Deacon04236f92012-07-28 17:42:22 +0100532 pr_info("enabled with %s PMU driver, %d counters available\n",
533 armpmu->name, armpmu->num_events);
Will Deacon03052302012-09-21 14:23:47 +0100534 return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
Mark Rutland03b78982011-04-27 11:20:11 +0100535}
536