blob: 9b536be74f7bbb731cb1c8c56681ca774d8d1f7d [file] [log] [blame]
Jamie Iles1b8873a2010-02-02 20:25:44 +01001#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
Will Deacon43eab872010-11-13 19:04:32 +00007 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
Jean PIHET796d1292010-01-26 18:51:05 +01008 *
Jamie Iles1b8873a2010-02-02 20:25:44 +01009 * This code is based on the sparc64 perf event code, which is in turn based
Mark Rutlandd39976f2014-09-29 17:15:32 +010010 * on the x86 code.
Jamie Iles1b8873a2010-02-02 20:25:44 +010011 */
12#define pr_fmt(fmt) "hw perfevents: " fmt
13
Mark Rutlandcc88116d2015-05-13 17:12:25 +010014#include <linux/cpumask.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010015#include <linux/kernel.h>
Will Deacon49c006b2010-04-29 17:13:24 +010016#include <linux/platform_device.h>
Jon Hunter7be29582012-05-31 13:05:20 -050017#include <linux/pm_runtime.h>
Stephen Boydbbd64552014-02-07 21:01:19 +000018#include <linux/irq.h>
19#include <linux/irqdesc.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010020
Jamie Iles1b8873a2010-02-02 20:25:44 +010021#include <asm/irq_regs.h>
22#include <asm/pmu.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010023
Jamie Iles1b8873a2010-02-02 20:25:44 +010024static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010025armpmu_map_cache_event(const unsigned (*cache_map)
26 [PERF_COUNT_HW_CACHE_MAX]
27 [PERF_COUNT_HW_CACHE_OP_MAX]
28 [PERF_COUNT_HW_CACHE_RESULT_MAX],
29 u64 config)
Jamie Iles1b8873a2010-02-02 20:25:44 +010030{
31 unsigned int cache_type, cache_op, cache_result, ret;
32
33 cache_type = (config >> 0) & 0xff;
34 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
35 return -EINVAL;
36
37 cache_op = (config >> 8) & 0xff;
38 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
39 return -EINVAL;
40
41 cache_result = (config >> 16) & 0xff;
42 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
43 return -EINVAL;
44
Mark Rutlande1f431b2011-04-28 15:47:10 +010045 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
Jamie Iles1b8873a2010-02-02 20:25:44 +010046
47 if (ret == CACHE_OP_UNSUPPORTED)
48 return -ENOENT;
49
50 return ret;
51}
52
53static int
Will Deacon6dbc0022012-07-29 12:36:28 +010054armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000055{
Stephen Boydd9f96632013-08-08 18:41:59 +010056 int mapping;
57
58 if (config >= PERF_COUNT_HW_MAX)
59 return -EINVAL;
60
61 mapping = (*event_map)[config];
Mark Rutlande1f431b2011-04-28 15:47:10 +010062 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
Will Deacon84fee972010-11-13 17:13:56 +000063}
64
65static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010066armpmu_map_raw_event(u32 raw_event_mask, u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000067{
Mark Rutlande1f431b2011-04-28 15:47:10 +010068 return (int)(config & raw_event_mask);
69}
70
Will Deacon6dbc0022012-07-29 12:36:28 +010071int
72armpmu_map_event(struct perf_event *event,
73 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
74 const unsigned (*cache_map)
75 [PERF_COUNT_HW_CACHE_MAX]
76 [PERF_COUNT_HW_CACHE_OP_MAX]
77 [PERF_COUNT_HW_CACHE_RESULT_MAX],
78 u32 raw_event_mask)
Mark Rutlande1f431b2011-04-28 15:47:10 +010079{
80 u64 config = event->attr.config;
Mark Rutland67b43052012-09-12 10:53:23 +010081 int type = event->attr.type;
Mark Rutlande1f431b2011-04-28 15:47:10 +010082
Mark Rutland67b43052012-09-12 10:53:23 +010083 if (type == event->pmu->type)
84 return armpmu_map_raw_event(raw_event_mask, config);
85
86 switch (type) {
Mark Rutlande1f431b2011-04-28 15:47:10 +010087 case PERF_TYPE_HARDWARE:
Will Deacon6dbc0022012-07-29 12:36:28 +010088 return armpmu_map_hw_event(event_map, config);
Mark Rutlande1f431b2011-04-28 15:47:10 +010089 case PERF_TYPE_HW_CACHE:
90 return armpmu_map_cache_event(cache_map, config);
91 case PERF_TYPE_RAW:
92 return armpmu_map_raw_event(raw_event_mask, config);
93 }
94
95 return -ENOENT;
Will Deacon84fee972010-11-13 17:13:56 +000096}
97
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010098int armpmu_event_set_period(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +010099{
Mark Rutland8a16b342011-04-28 16:27:54 +0100100 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100101 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200102 s64 left = local64_read(&hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100103 s64 period = hwc->sample_period;
104 int ret = 0;
105
106 if (unlikely(left <= -period)) {
107 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200108 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100109 hwc->last_period = period;
110 ret = 1;
111 }
112
113 if (unlikely(left <= 0)) {
114 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200115 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100116 hwc->last_period = period;
117 ret = 1;
118 }
119
Daniel Thompson2d9ed742015-01-05 15:58:54 +0100120 /*
121 * Limit the maximum period to prevent the counter value
122 * from overtaking the one we are about to program. In
123 * effect we are reducing max_period to account for
124 * interrupt latency (and we are being very conservative).
125 */
126 if (left > (armpmu->max_period >> 1))
127 left = armpmu->max_period >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100128
Peter Zijlstrae7850592010-05-21 14:43:08 +0200129 local64_set(&hwc->prev_count, (u64)-left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100130
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100131 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100132
133 perf_event_update_userpage(event);
134
135 return ret;
136}
137
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100138u64 armpmu_event_update(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100139{
Mark Rutland8a16b342011-04-28 16:27:54 +0100140 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100141 struct hw_perf_event *hwc = &event->hw;
Will Deacona7378232011-03-25 17:12:37 +0100142 u64 delta, prev_raw_count, new_raw_count;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100143
144again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200145 prev_raw_count = local64_read(&hwc->prev_count);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100146 new_raw_count = armpmu->read_counter(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100147
Peter Zijlstrae7850592010-05-21 14:43:08 +0200148 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100149 new_raw_count) != prev_raw_count)
150 goto again;
151
Will Deacon57273472012-03-06 17:33:17 +0100152 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100153
Peter Zijlstrae7850592010-05-21 14:43:08 +0200154 local64_add(delta, &event->count);
155 local64_sub(delta, &hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100156
157 return new_raw_count;
158}
159
160static void
Jamie Iles1b8873a2010-02-02 20:25:44 +0100161armpmu_read(struct perf_event *event)
162{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100163 armpmu_event_update(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100164}
165
166static void
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200167armpmu_stop(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100168{
Mark Rutland8a16b342011-04-28 16:27:54 +0100169 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100170 struct hw_perf_event *hwc = &event->hw;
171
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200172 /*
173 * ARM pmu always has to update the counter, so ignore
174 * PERF_EF_UPDATE, see comments in armpmu_start().
175 */
176 if (!(hwc->state & PERF_HES_STOPPED)) {
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100177 armpmu->disable(event);
178 armpmu_event_update(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200179 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
180 }
181}
182
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100183static void armpmu_start(struct perf_event *event, int flags)
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200184{
Mark Rutland8a16b342011-04-28 16:27:54 +0100185 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200186 struct hw_perf_event *hwc = &event->hw;
187
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200188 /*
189 * ARM pmu always has to reprogram the period, so ignore
190 * PERF_EF_RELOAD, see the comment below.
191 */
192 if (flags & PERF_EF_RELOAD)
193 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
194
195 hwc->state = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100196 /*
197 * Set the period again. Some counters can't be stopped, so when we
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200198 * were stopped we simply disabled the IRQ source and the counter
Jamie Iles1b8873a2010-02-02 20:25:44 +0100199 * may have been left counting. If we don't do this step then we may
200 * get an interrupt too soon or *way* too late if the overflow has
201 * happened since disabling.
202 */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100203 armpmu_event_set_period(event);
204 armpmu->enable(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100205}
206
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200207static void
208armpmu_del(struct perf_event *event, int flags)
209{
Mark Rutland8a16b342011-04-28 16:27:54 +0100210 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100211 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200212 struct hw_perf_event *hwc = &event->hw;
213 int idx = hwc->idx;
214
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200215 armpmu_stop(event, PERF_EF_UPDATE);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100216 hw_events->events[idx] = NULL;
217 clear_bit(idx, hw_events->used_mask);
Stephen Boydeab443e2014-02-07 21:01:22 +0000218 if (armpmu->clear_event_idx)
219 armpmu->clear_event_idx(hw_events, event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200220
221 perf_event_update_userpage(event);
222}
223
Jamie Iles1b8873a2010-02-02 20:25:44 +0100224static int
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200225armpmu_add(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100226{
Mark Rutland8a16b342011-04-28 16:27:54 +0100227 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100228 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100229 struct hw_perf_event *hwc = &event->hw;
230 int idx;
231 int err = 0;
232
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100233 /* An event following a process won't be stopped earlier */
234 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
235 return -ENOENT;
236
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200237 perf_pmu_disable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200238
Jamie Iles1b8873a2010-02-02 20:25:44 +0100239 /* If we don't have a space for the counter then finish early. */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100240 idx = armpmu->get_event_idx(hw_events, event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100241 if (idx < 0) {
242 err = idx;
243 goto out;
244 }
245
246 /*
247 * If there is an event in the counter we are going to use then make
248 * sure it is disabled.
249 */
250 event->hw.idx = idx;
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100251 armpmu->disable(event);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100252 hw_events->events[idx] = event;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100253
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200254 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
255 if (flags & PERF_EF_START)
256 armpmu_start(event, PERF_EF_RELOAD);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100257
258 /* Propagate our changes to the userspace mapping. */
259 perf_event_update_userpage(event);
260
261out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200262 perf_pmu_enable(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100263 return err;
264}
265
Jamie Iles1b8873a2010-02-02 20:25:44 +0100266static int
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000267validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
268 struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100269{
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000270 struct arm_pmu *armpmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100271
Will Deaconc95eb312013-08-07 23:39:41 +0100272 if (is_software_event(event))
273 return 1;
274
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000275 /*
276 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
277 * core perf code won't check that the pmu->ctx == leader->ctx
278 * until after pmu->event_init(event).
279 */
280 if (event->pmu != pmu)
281 return 0;
282
Will Deacon2dfcb802013-10-09 13:51:29 +0100283 if (event->state < PERF_EVENT_STATE_OFF)
Will Deaconcb2d8b32013-04-12 19:04:19 +0100284 return 1;
285
286 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
Will Deacon65b47112010-09-02 09:32:08 +0100287 return 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100288
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000289 armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100290 return armpmu->get_event_idx(hw_events, event) >= 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100291}
292
293static int
294validate_group(struct perf_event *event)
295{
296 struct perf_event *sibling, *leader = event->group_leader;
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100297 struct pmu_hw_events fake_pmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100298
Will Deaconbce34d12011-11-17 15:05:14 +0000299 /*
300 * Initialise the fake PMU. We only need to populate the
301 * used_mask for the purposes of validation.
302 */
Mark Rutlanda4560842014-05-13 19:08:19 +0100303 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
Jamie Iles1b8873a2010-02-02 20:25:44 +0100304
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000305 if (!validate_event(event->pmu, &fake_pmu, leader))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100306 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100307
308 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000309 if (!validate_event(event->pmu, &fake_pmu, sibling))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100310 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100311 }
312
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000313 if (!validate_event(event->pmu, &fake_pmu, event))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100314 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100315
316 return 0;
317}
318
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100319static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530320{
Stephen Boydbbd64552014-02-07 21:01:19 +0000321 struct arm_pmu *armpmu;
322 struct platform_device *plat_device;
323 struct arm_pmu_platdata *plat;
Will Deacon5f5092e2014-02-11 18:08:41 +0000324 int ret;
325 u64 start_clock, finish_clock;
Stephen Boydbbd64552014-02-07 21:01:19 +0000326
Mark Rutland5ebd9202014-05-13 19:46:10 +0100327 /*
328 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
329 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
330 * do any necessary shifting, we just need to perform the first
331 * dereference.
332 */
333 armpmu = *(void **)dev;
Stephen Boydbbd64552014-02-07 21:01:19 +0000334 plat_device = armpmu->plat_device;
335 plat = dev_get_platdata(&plat_device->dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530336
Will Deacon5f5092e2014-02-11 18:08:41 +0000337 start_clock = sched_clock();
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100338 if (plat && plat->handle_irq)
Mark Rutland5ebd9202014-05-13 19:46:10 +0100339 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100340 else
Mark Rutland5ebd9202014-05-13 19:46:10 +0100341 ret = armpmu->handle_irq(irq, armpmu);
Will Deacon5f5092e2014-02-11 18:08:41 +0000342 finish_clock = sched_clock();
343
344 perf_sample_event_took(finish_clock - start_clock);
345 return ret;
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530346}
347
Will Deacon0b390e22011-07-27 15:18:59 +0100348static void
Mark Rutland8a16b342011-04-28 16:27:54 +0100349armpmu_release_hardware(struct arm_pmu *armpmu)
Will Deacon0b390e22011-07-27 15:18:59 +0100350{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100351 armpmu->free_irq(armpmu);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100352 pm_runtime_put_sync(&armpmu->plat_device->dev);
Will Deacon0b390e22011-07-27 15:18:59 +0100353}
354
Jamie Iles1b8873a2010-02-02 20:25:44 +0100355static int
Mark Rutland8a16b342011-04-28 16:27:54 +0100356armpmu_reserve_hardware(struct arm_pmu *armpmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100357{
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100358 int err;
Mark Rutlanda9356a02011-05-04 09:23:15 +0100359 struct platform_device *pmu_device = armpmu->plat_device;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100360
Will Deacone5a21322011-11-22 18:01:46 +0000361 if (!pmu_device)
362 return -ENODEV;
363
Jon Hunter7be29582012-05-31 13:05:20 -0500364 pm_runtime_get_sync(&pmu_device->dev);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100365 err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100366 if (err) {
367 armpmu_release_hardware(armpmu);
368 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100369 }
370
Will Deacon0b390e22011-07-27 15:18:59 +0100371 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100372}
373
Jamie Iles1b8873a2010-02-02 20:25:44 +0100374static void
375hw_perf_event_destroy(struct perf_event *event)
376{
Mark Rutland8a16b342011-04-28 16:27:54 +0100377 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100378 atomic_t *active_events = &armpmu->active_events;
379 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
380
381 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
Mark Rutland8a16b342011-04-28 16:27:54 +0100382 armpmu_release_hardware(armpmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100383 mutex_unlock(pmu_reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100384 }
385}
386
387static int
Will Deacon05d22fd2011-07-19 11:57:30 +0100388event_requires_mode_exclusion(struct perf_event_attr *attr)
389{
390 return attr->exclude_idle || attr->exclude_user ||
391 attr->exclude_kernel || attr->exclude_hv;
392}
393
394static int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100395__hw_perf_event_init(struct perf_event *event)
396{
Mark Rutland8a16b342011-04-28 16:27:54 +0100397 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100398 struct hw_perf_event *hwc = &event->hw;
Mark Rutland9dcbf462013-01-18 16:10:06 +0000399 int mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100400
Mark Rutlande1f431b2011-04-28 15:47:10 +0100401 mapping = armpmu->map_event(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100402
403 if (mapping < 0) {
404 pr_debug("event %x:%llx not supported\n", event->attr.type,
405 event->attr.config);
406 return mapping;
407 }
408
409 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100410 * We don't assign an index until we actually place the event onto
411 * hardware. Use -1 to signify that we haven't decided where to put it
412 * yet. For SMP systems, each core has it's own PMU so we can't do any
413 * clever allocation or constraints checking at this point.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100414 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100415 hwc->idx = -1;
416 hwc->config_base = 0;
417 hwc->config = 0;
418 hwc->event_base = 0;
419
420 /*
421 * Check whether we need to exclude the counter from certain modes.
422 */
423 if ((!armpmu->set_event_filter ||
424 armpmu->set_event_filter(hwc, &event->attr)) &&
425 event_requires_mode_exclusion(&event->attr)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100426 pr_debug("ARM performance counters do not support "
427 "mode exclusion\n");
Will Deaconfdeb8e32012-07-04 18:15:42 +0100428 return -EOPNOTSUPP;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100429 }
430
431 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100432 * Store the event encoding into the config_base field.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100433 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100434 hwc->config_base |= (unsigned long)mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100435
Vince Weaveredcb4d32014-05-16 17:15:49 -0400436 if (!is_sampling_event(event)) {
Will Deacon57273472012-03-06 17:33:17 +0100437 /*
438 * For non-sampling runs, limit the sample_period to half
439 * of the counter width. That way, the new counter value
440 * is far less likely to overtake the previous one unless
441 * you have some serious IRQ latency issues.
442 */
443 hwc->sample_period = armpmu->max_period >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100444 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200445 local64_set(&hwc->period_left, hwc->sample_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100446 }
447
Jamie Iles1b8873a2010-02-02 20:25:44 +0100448 if (event->group_leader != event) {
Chen Gange595ede2013-02-28 17:51:29 +0100449 if (validate_group(event) != 0)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100450 return -EINVAL;
451 }
452
Mark Rutland9dcbf462013-01-18 16:10:06 +0000453 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100454}
455
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200456static int armpmu_event_init(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100457{
Mark Rutland8a16b342011-04-28 16:27:54 +0100458 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100459 int err = 0;
Mark Rutland03b78982011-04-27 11:20:11 +0100460 atomic_t *active_events = &armpmu->active_events;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100461
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100462 /*
463 * Reject CPU-affine events for CPUs that are of a different class to
464 * that which this PMU handles. Process-following events (where
465 * event->cpu == -1) can be migrated between CPUs, and thus we have to
466 * reject them later (in armpmu_add) if they're scheduled on a
467 * different class of CPU.
468 */
469 if (event->cpu != -1 &&
470 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
471 return -ENOENT;
472
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100473 /* does not support taken branch sampling */
474 if (has_branch_stack(event))
475 return -EOPNOTSUPP;
476
Mark Rutlande1f431b2011-04-28 15:47:10 +0100477 if (armpmu->map_event(event) == -ENOENT)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200478 return -ENOENT;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200479
Jamie Iles1b8873a2010-02-02 20:25:44 +0100480 event->destroy = hw_perf_event_destroy;
481
Mark Rutland03b78982011-04-27 11:20:11 +0100482 if (!atomic_inc_not_zero(active_events)) {
483 mutex_lock(&armpmu->reserve_mutex);
484 if (atomic_read(active_events) == 0)
Mark Rutland8a16b342011-04-28 16:27:54 +0100485 err = armpmu_reserve_hardware(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100486
487 if (!err)
Mark Rutland03b78982011-04-27 11:20:11 +0100488 atomic_inc(active_events);
489 mutex_unlock(&armpmu->reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100490 }
491
492 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200493 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100494
495 err = __hw_perf_event_init(event);
496 if (err)
497 hw_perf_event_destroy(event);
498
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200499 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100500}
501
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200502static void armpmu_enable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100503{
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100504 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100505 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Mark Rutland7325eae2011-08-23 11:59:49 +0100506 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100507
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100508 /* For task-bound events we may be called on other CPUs */
509 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
510 return;
511
Will Deaconf4f38432011-07-01 14:38:12 +0100512 if (enabled)
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100513 armpmu->start(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100514}
515
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200516static void armpmu_disable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100517{
Mark Rutland8a16b342011-04-28 16:27:54 +0100518 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100519
520 /* For task-bound events we may be called on other CPUs */
521 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
522 return;
523
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100524 armpmu->stop(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100525}
526
Rafael J. Wysockibf7c5442014-12-13 00:42:49 +0100527#ifdef CONFIG_PM
Jon Hunter7be29582012-05-31 13:05:20 -0500528static int armpmu_runtime_resume(struct device *dev)
529{
530 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
531
532 if (plat && plat->runtime_resume)
533 return plat->runtime_resume(dev);
534
535 return 0;
536}
537
538static int armpmu_runtime_suspend(struct device *dev)
539{
540 struct arm_pmu_platdata *plat = dev_get_platdata(dev);
541
542 if (plat && plat->runtime_suspend)
543 return plat->runtime_suspend(dev);
544
545 return 0;
546}
547#endif
548
Will Deacon6dbc0022012-07-29 12:36:28 +0100549const struct dev_pm_ops armpmu_dev_pm_ops = {
550 SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
551};
552
Stephen Boyd44d6b1f2013-03-05 03:54:06 +0100553static void armpmu_init(struct arm_pmu *armpmu)
Mark Rutland03b78982011-04-27 11:20:11 +0100554{
555 atomic_set(&armpmu->active_events, 0);
556 mutex_init(&armpmu->reserve_mutex);
Mark Rutland8a16b342011-04-28 16:27:54 +0100557
558 armpmu->pmu = (struct pmu) {
559 .pmu_enable = armpmu_enable,
560 .pmu_disable = armpmu_disable,
561 .event_init = armpmu_event_init,
562 .add = armpmu_add,
563 .del = armpmu_del,
564 .start = armpmu_start,
565 .stop = armpmu_stop,
566 .read = armpmu_read,
567 };
568}
569
Will Deacon03052302012-09-21 14:23:47 +0100570int armpmu_register(struct arm_pmu *armpmu, int type)
Mark Rutland8a16b342011-04-28 16:27:54 +0100571{
572 armpmu_init(armpmu);
Jon Hunter2ac29a12012-10-25 21:23:18 +0100573 pm_runtime_enable(&armpmu->plat_device->dev);
Will Deacon04236f92012-07-28 17:42:22 +0100574 pr_info("enabled with %s PMU driver, %d counters available\n",
575 armpmu->name, armpmu->num_events);
Will Deacon03052302012-09-21 14:23:47 +0100576 return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
Mark Rutland03b78982011-04-27 11:20:11 +0100577}
578