blob: bfa92644ce73302937d44ef696c4673447618785 [file] [log] [blame]
Aurora pro automergerec2e64f2023-03-14 02:07:34 +00001// SPDX-License-Identifier: GPL-2.0-only
Aurora pro automerger8a4b0d92023-01-06 06:59:41 +00002/*
3 * GXP core telemetry support
4 *
5 * Copyright (C) 2021-2022 Google LLC
6 */
7
8#include <linux/moduleparam.h>
9#include <linux/slab.h>
10#include <linux/wait.h>
11
12#include "gxp-config.h"
13#include "gxp-core-telemetry.h"
14#include "gxp-dma.h"
15#include "gxp-firmware.h"
16#include "gxp-firmware-data.h"
17#include "gxp-host-device-structs.h"
18#include "gxp-notification.h"
19#include "gxp-vd.h"
20
21static uint gxp_core_telemetry_buffer_size = CORE_TELEMETRY_DEFAULT_BUFFER_SIZE;
22module_param_named(core_telemetry_buffer_size, gxp_core_telemetry_buffer_size, uint, 0660);
23
24static inline bool is_telemetry_enabled(struct gxp_dev *gxp, uint core, u8 type)
25{
26 u32 device_status =
27 gxp_fw_data_get_core_telemetry_device_status(gxp, core, type);
28
29 return device_status & GXP_CORE_TELEMETRY_DEVICE_STATUS_ENABLED;
30}
31
32void gxp_core_telemetry_status_notify(struct gxp_dev *gxp, uint core)
33{
34 struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
35
36 /* Wake any threads waiting on a core telemetry disable ACK */
37 wake_up(&mgr->waitq);
38
39 /* Signal the appropriate eventfd for any active core telemetry types */
40 mutex_lock(&mgr->lock);
41
42 if (is_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_LOGGING) &&
43 mgr->logging_efd)
44 eventfd_signal(mgr->logging_efd, 1);
45
46 if (is_telemetry_enabled(gxp, core, GXP_TELEMETRY_TYPE_TRACING) &&
47 mgr->tracing_efd)
48 eventfd_signal(mgr->tracing_efd, 1);
49
50 mutex_unlock(&mgr->lock);
51}
52
53static void telemetry_status_notification_work(struct work_struct *work)
54{
55 struct gxp_core_telemetry_work *telem_work =
56 container_of(work, struct gxp_core_telemetry_work, work);
57 struct gxp_dev *gxp = telem_work->gxp;
58 uint core = telem_work->core;
59
60 gxp_core_telemetry_status_notify(gxp, core);
61}
62
63static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
64 size_t size);
65static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data);
66
67/**
68 * enable_telemetry_buffers() - enable the telemetry buffers from host.
69 *
70 * @gxp: The GXP device the buffers were allocated for.
71 * @data: The data describing a set of core telemetry buffers to be enabled.
72 * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`.
73 *
74 * Return:
75 * * 0 - Success
76 * * otherwise - Error returned by `gxp_fw_data_set_core_telemetry_descriptors()`
77 */
78static int enable_telemetry_buffers(struct gxp_dev *gxp,
79 struct buffer_data *data, u8 type)
80{
81 int i, ret;
82
83 /* Initialize the per core telemetry buffers header with magic code. */
84 for (i = 0; i < GXP_NUM_CORES; i++) {
85 /*
86 * First 64 bytes of per core telemetry buffers are reserved
87 * for buffer metadata header. We don't need to explicitly
88 * reset the header fields as during buffer allocation the
89 * entire buffer is zeroed out. First 4 bytes of buffer
90 * metadata header are reserved for valid_magic field.
91 */
92 *((uint *)data->buffers[i].vaddr) =
93 GXP_TELEMETRY_BUFFER_VALID_MAGIC_CODE;
94 }
95
96 data->host_status |= GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
97 ret = gxp_fw_data_set_core_telemetry_descriptors(
98 gxp, type, data->host_status, data->buffers, data->size);
99
100 if (ret) {
101 dev_err(gxp->dev,
102 "setting telemetry buffers in scratchpad region failed (ret=%d).",
103 ret);
104 return ret;
105 }
106
107 data->is_enabled = true;
108 return 0;
109}
110
111int gxp_core_telemetry_init(struct gxp_dev *gxp)
112{
113 struct gxp_core_telemetry_manager *mgr;
114 struct buffer_data *log_buff_data, *trace_buff_data;
115 int i, ret;
116
117 mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
118 if (!mgr)
119 return -ENOMEM;
120
121 mutex_init(&mgr->lock);
122 for (i = 0; i < GXP_NUM_CORES; i++) {
123 INIT_WORK(&mgr->notification_works[i].work,
124 telemetry_status_notification_work);
125 mgr->notification_works[i].gxp = gxp;
126 mgr->notification_works[i].core = i;
127
128 }
129 init_waitqueue_head(&mgr->waitq);
130
131 gxp->core_telemetry_mgr = mgr;
132 gxp_core_telemetry_buffer_size = ALIGN(gxp_core_telemetry_buffer_size,
133 GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE);
134 if ((gxp_core_telemetry_buffer_size < CORE_TELEMETRY_DEFAULT_BUFFER_SIZE) ||
135 (gxp_core_telemetry_buffer_size > CORE_TELEMETRY_MAX_BUFFER_SIZE)) {
136 dev_warn(gxp->dev,
137 "Invalid core telemetry buffer size, enforcing to default %u bytes\n",
138 CORE_TELEMETRY_DEFAULT_BUFFER_SIZE);
139 gxp_core_telemetry_buffer_size = CORE_TELEMETRY_DEFAULT_BUFFER_SIZE;
140 }
141
142 /* TODO(b/260959553): Remove mutex_lock/unlock during legacy telemetry removal */
143 mutex_lock(&mgr->lock);
144 log_buff_data = allocate_telemetry_buffers(gxp, gxp_core_telemetry_buffer_size);
145 if (IS_ERR_OR_NULL(log_buff_data)) {
146 dev_warn(gxp->dev,
147 "Failed to allocate per core log buffer of %u bytes\n",
148 gxp_core_telemetry_buffer_size);
149 ret = -ENOMEM;
150 goto err_free_buffers;
151 }
152
153 trace_buff_data = allocate_telemetry_buffers(gxp, gxp_core_telemetry_buffer_size);
154 if (IS_ERR_OR_NULL(trace_buff_data)) {
155 dev_warn(gxp->dev,
156 "Failed to allocate per core trace buffer of %u bytes\n",
157 gxp_core_telemetry_buffer_size);
158 free_telemetry_buffers(gxp, log_buff_data);
159 ret = -ENOMEM;
160 goto err_free_buffers;
161 }
162
163 ret = enable_telemetry_buffers(gxp, log_buff_data,
164 GXP_TELEMETRY_TYPE_LOGGING);
165 if (ret) {
166 dev_warn(gxp->dev, "enable telemetry buffer failed (ret=%d)",
167 ret);
168 goto err_free;
169 }
170 ret = enable_telemetry_buffers(gxp, trace_buff_data,
171 GXP_TELEMETRY_TYPE_TRACING);
172 if (ret) {
173 dev_warn(gxp->dev, "enable telemetry buffer failed (ret=%d)",
174 ret);
175 goto err_free;
176 }
177
178 gxp->core_telemetry_mgr->logging_buff_data = log_buff_data;
179 gxp->core_telemetry_mgr->tracing_buff_data = trace_buff_data;
180 mutex_unlock(&mgr->lock);
181 return 0;
182
183err_free:
184 free_telemetry_buffers(gxp, log_buff_data);
185 free_telemetry_buffers(gxp, trace_buff_data);
186err_free_buffers:
187 mutex_unlock(&mgr->lock);
188 mutex_destroy(&mgr->lock);
189 devm_kfree(gxp->dev, mgr);
190 gxp->core_telemetry_mgr = NULL;
191 return ret;
192}
193
194/* Wrapper struct to be used by the core telemetry vma_ops. */
195struct telemetry_vma_data {
196 struct gxp_dev *gxp;
197 struct buffer_data *buff_data;
198 u8 type;
199 refcount_t ref_count;
200};
201
202static void telemetry_vma_open(struct vm_area_struct *vma)
203{
204 struct gxp_dev *gxp;
205 struct telemetry_vma_data *vma_data =
206 (struct telemetry_vma_data *)vma->vm_private_data;
207 /*
208 * vma_ops are required only for legacy telemetry flow
209 * to keep track of buffer allocation during mmap and
210 * buffer free during munmap.
211 */
212 if (IS_ERR_OR_NULL(vma_data))
213 return;
214
215 gxp = vma_data->gxp;
216 mutex_lock(&gxp->core_telemetry_mgr->lock);
217
218 refcount_inc(&vma_data->ref_count);
219
220 mutex_unlock(&gxp->core_telemetry_mgr->lock);
221}
222
223static void telemetry_vma_close(struct vm_area_struct *vma)
224{
225 struct gxp_dev *gxp;
226 struct buffer_data *buff_data;
227 u8 type;
228 struct telemetry_vma_data *vma_data =
229 (struct telemetry_vma_data *)vma->vm_private_data;
230 /*
231 * vma_ops are required only for legacy telemetry flow
232 * to keep track of buffer allocation during mmap and
233 * buffer free during munmap.
234 */
235 if (IS_ERR_OR_NULL(vma_data))
236 return;
237
238 gxp = vma_data->gxp;
239 buff_data = vma_data->buff_data;
240 type = vma_data->type;
241
242 mutex_lock(&gxp->core_telemetry_mgr->lock);
243
244 if (!refcount_dec_and_test(&vma_data->ref_count))
245 goto out;
246
247 /*
248 * Free the core telemetry buffers if they are no longer in use.
249 *
250 * If a client enabled core telemetry, then closed their VMA without
251 * disabling it, firmware will still be expecting those buffers to be
252 * mapped. If this is the case, core telemetry will be disabled, and the
253 * buffers freed, when the client is closed.
254 *
255 * We cannot disable core telemetry here, since attempting to lock the
256 * `vd_semaphore` while holding the mmap lock can lead to deadlocks.
257 */
258 if (refcount_dec_and_test(&buff_data->ref_count)) {
259 switch (type) {
260 case GXP_TELEMETRY_TYPE_LOGGING:
261 gxp->core_telemetry_mgr->logging_buff_data_legacy = NULL;
262 break;
263 case GXP_TELEMETRY_TYPE_TRACING:
264 gxp->core_telemetry_mgr->tracing_buff_data_legacy = NULL;
265 break;
266 default:
267 dev_warn(gxp->dev, "%s called with invalid type %u\n",
268 __func__, type);
269 }
270 free_telemetry_buffers(gxp, buff_data);
271 }
272
273 kfree(vma_data);
274
275out:
276 mutex_unlock(&gxp->core_telemetry_mgr->lock);
277}
278
279/* TODO(b/260959553): Remove vma ops during legacy telemetry removal */
280static const struct vm_operations_struct telemetry_vma_ops = {
281 .open = telemetry_vma_open,
282 .close = telemetry_vma_close,
283};
284
285/**
286 * check_telemetry_type_availability() - Checks if @type is valid and whether
287 * buffers of that type already exists.
288 * @gxp: The GXP device to check availability for
289 * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
290 *
291 * Caller must hold the core telemetry_manager's lock.
292 *
293 * Return:
294 * * 0 - @type is valid and can have new buffers created
295 * * -EBUSY - Buffers already exist for @type
296 * * -EINVAL - @type is not a valid core telemetry type
297 */
298static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
299{
300 lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
301
302 switch (type) {
303 case GXP_TELEMETRY_TYPE_LOGGING:
304 if (gxp->core_telemetry_mgr->logging_buff_data_legacy)
305 return -EBUSY;
306 break;
307 case GXP_TELEMETRY_TYPE_TRACING:
308 if (gxp->core_telemetry_mgr->tracing_buff_data_legacy)
309 return -EBUSY;
310 break;
311 default:
312 return -EINVAL;
313 }
314
315 return 0;
316}
317
318/**
319 * allocate_telemetry_buffers() - Allocate and populate a `struct buffer_data`,
320 * including allocating and mapping one coherent
321 * buffer of @size bytes per core.
322 * @gxp: The GXP device to allocate the buffers for
323 * @size: The size of buffer to allocate for each core
324 *
325 * Caller must hold the core telemetry_manager's lock.
326 *
327 * Return: A pointer to the `struct buffer_data` if successful, error otherwise
328 */
329static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
330 size_t size)
331{
332 struct buffer_data *data;
333 int i;
334 int ret = 0;
335
336 size = size < PAGE_SIZE ? PAGE_SIZE : size;
337
338 /* TODO(b/260959553): Remove lockdep_assert_held during legacy telemetry removal */
339 lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
340
341 data = kzalloc(sizeof(*data), GFP_KERNEL);
342 if (!data)
343 return NULL;
344
345 /* Allocate cache-coherent buffers for logging/tracing to */
346 for (i = 0; i < GXP_NUM_CORES; i++) {
347 /* Allocate a coherent buffer in the default domain */
348 ret = gxp_dma_alloc_coherent_buf(gxp, NULL, size, GFP_KERNEL, 0,
349 &data->buffers[i]);
350 if (ret) {
351 dev_err(gxp->dev,
352 "Failed to allocate coherent buffer\n");
353 goto err_alloc;
354 }
355 }
356 data->size = size;
357 refcount_set(&data->ref_count, 1);
358 data->is_enabled = false;
359
360 return data;
361
362err_alloc:
363 while (i--)
364 gxp_dma_free_coherent_buf(gxp, NULL, &data->buffers[i]);
365 kfree(data);
366
367 return ERR_PTR(ret);
368}
369
370/**
371 * free_telemetry_buffers() - Unmap and free a `struct buffer_data`
372 * @gxp: The GXP device the buffers were allocated for
373 * @data: The descriptor of the buffers to unmap and free
374 *
375 * Caller must hold the core telemetry_manager's lock.
376 */
377static void free_telemetry_buffers(struct gxp_dev *gxp, struct buffer_data *data)
378{
379 int i;
380
381 /* TODO(b/260959553): Remove lockdep_assert_held during legacy telemetry removal */
382 lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
383
384 for (i = 0; i < GXP_NUM_CORES; i++)
385 gxp_dma_free_coherent_buf(gxp, NULL, &data->buffers[i]);
386
387 kfree(data);
388}
389
390/**
391 * remap_telemetry_buffers() - Remaps a set of core telemetry buffers into a
392 * user-space vm_area.
393 * @gxp: The GXP device the buffers were allocated for
394 * @vma: A vm area to remap the buffers into
395 * @buff_data: The data describing a set of core telemetry buffers to remap
396 *
397 * Caller must hold the core telemetry_manager's lock.
398 *
399 * Return:
400 * * 0 - Success
401 * * otherwise - Error returned by `remap_pfn_range()`
402 */
403static int remap_telemetry_buffers(struct gxp_dev *gxp,
404 struct vm_area_struct *vma,
405 struct buffer_data *buff_data)
406{
407 unsigned long orig_pgoff = vma->vm_pgoff;
408 int i;
409 unsigned long offset;
410 phys_addr_t phys;
411 int ret = 0;
412
413 lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
414
415 /* mmap the buffers */
416 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
417 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
418 vma->vm_pgoff = 0;
419
420 for (i = 0; i < GXP_NUM_CORES; i++) {
421 /*
422 * Remap each core's buffer a page at a time, in case it is not
423 * physically contiguous.
424 */
425 for (offset = 0; offset < buff_data->size; offset += PAGE_SIZE) {
426 /*
427 * `virt_to_phys()` does not work on memory allocated
428 * by `dma_alloc_coherent()`, so we have to use
429 * `iommu_iova_to_phys()` instead. Since all buffers
430 * are mapped to the default domain as well as any per-
431 * core domains, we can use it here to get the physical
432 * address of any valid IOVA, regardless of its core.
433 */
434 phys = iommu_iova_to_phys(
435 iommu_get_domain_for_dev(gxp->dev),
436 buff_data->buffers[i].dma_addr + offset);
437 ret = remap_pfn_range(
438 vma,
439 vma->vm_start + buff_data->size * i + offset,
440 phys >> PAGE_SHIFT, PAGE_SIZE,
441 vma->vm_page_prot);
442 if (ret)
443 goto out;
444 }
445 }
446
447out:
448 vma->vm_pgoff = orig_pgoff;
449 /* TODO(b/260959553): Remove vma ops during legacy telemetry removal */
450 vma->vm_ops = &telemetry_vma_ops;
451
452 return ret;
453}
454
455int gxp_core_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
456 struct vm_area_struct *vma)
457{
458 int ret = 0;
459 struct buffer_data *buff_data;
460 size_t total_size = vma->vm_end - vma->vm_start;
461 size_t size = total_size / GXP_NUM_CORES;
462
463 if (!gxp->core_telemetry_mgr)
464 return -ENODEV;
465
466 if (type == GXP_TELEMETRY_TYPE_LOGGING)
467 buff_data = gxp->core_telemetry_mgr->logging_buff_data;
468 else if (type == GXP_TELEMETRY_TYPE_TRACING)
469 buff_data = gxp->core_telemetry_mgr->tracing_buff_data;
470 else
471 return -EINVAL;
472 /*
473 * Total size must divide evenly into a GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE
474 * aligned buffer per core.
475 */
476 if (!total_size ||
477 total_size % (GXP_CORE_TELEMETRY_BUFFER_UNIT_SIZE * GXP_NUM_CORES)) {
478 dev_warn(
479 gxp->dev,
480 "Invalid vma size(%lu bytes) requested for telemetry\n",
481 total_size);
482 return -EINVAL;
483 }
484 /*
485 * Per core log buffer size should be equal to pre allocated
486 * aligned buffer per core.
487 */
488 if (size != buff_data->size) {
489 dev_warn(
490 gxp->dev,
491 "Invalid per core requested telemetry buffer size(%lu bytes)\n",
492 size);
493 return -EINVAL;
494 }
495 mutex_lock(&gxp->core_telemetry_mgr->lock);
496 ret = remap_telemetry_buffers(gxp, vma, buff_data);
497 if (ret)
498 goto err;
499 vma->vm_private_data = NULL;
500 mutex_unlock(&gxp->core_telemetry_mgr->lock);
501 return 0;
502err:
503 mutex_unlock(&gxp->core_telemetry_mgr->lock);
504 return ret;
505}
506
507int gxp_core_telemetry_mmap_buffers_legacy(struct gxp_dev *gxp, u8 type,
508 struct vm_area_struct *vma)
509{
510 int ret = 0;
511 struct telemetry_vma_data *vma_data;
512 size_t total_size = vma->vm_end - vma->vm_start;
513 size_t size = total_size / GXP_NUM_CORES;
514 struct buffer_data *buff_data;
515 int i;
516
517 if (!gxp->core_telemetry_mgr)
518 return -ENODEV;
519
520 /* Total size must divide evenly into 1 page-aligned buffer per core */
521 if (!total_size || total_size % (PAGE_SIZE * GXP_NUM_CORES))
522 return -EINVAL;
523
524 mutex_lock(&gxp->core_telemetry_mgr->lock);
525
526 ret = check_telemetry_type_availability(gxp, type);
527 if (ret)
528 goto err;
529
530 vma_data = kmalloc(sizeof(*vma_data), GFP_KERNEL);
531 if (!vma_data) {
532 ret = -ENOMEM;
533 goto err;
534 }
535
536 buff_data = allocate_telemetry_buffers(gxp, size);
537 if (IS_ERR(buff_data)) {
538 ret = PTR_ERR(buff_data);
539 goto err_free_vma_data;
540 }
541
542 ret = remap_telemetry_buffers(gxp, vma, buff_data);
543 if (ret)
544 goto err_free_buffers;
545
546 vma_data->gxp = gxp;
547 vma_data->buff_data = buff_data;
548 vma_data->type = type;
549 refcount_set(&vma_data->ref_count, 1);
550 vma->vm_private_data = vma_data;
551
552 /* Save book-keeping on the buffers in the core telemetry manager */
553 if (type == GXP_TELEMETRY_TYPE_LOGGING)
554 gxp->core_telemetry_mgr->logging_buff_data_legacy = buff_data;
555 else /* type == GXP_TELEMETRY_TYPE_TRACING */
556 gxp->core_telemetry_mgr->tracing_buff_data_legacy = buff_data;
557
558 mutex_unlock(&gxp->core_telemetry_mgr->lock);
559
560 return 0;
561
562err_free_buffers:
563 for (i = 0; i < GXP_NUM_CORES; i++)
564 gxp_dma_free_coherent_buf(gxp, NULL, &buff_data->buffers[i]);
565 kfree(buff_data);
566
567err_free_vma_data:
568 kfree(vma_data);
569
570err:
571 mutex_unlock(&gxp->core_telemetry_mgr->lock);
572 return ret;
573}
574
575int gxp_core_telemetry_enable(struct gxp_dev *gxp, u8 type)
576{
577 struct buffer_data *data;
578 int ret = 0;
579 uint core;
580 struct gxp_virtual_device *vd;
581
582 /*
583 * `vd_semaphore` cannot be acquired while holding the core telemetry
584 * lock, so acquire it here before locking the core telemetry lock.
585 */
586 down_read(&gxp->vd_semaphore);
587 mutex_lock(&gxp->core_telemetry_mgr->lock);
588
589 switch (type) {
590 case GXP_TELEMETRY_TYPE_LOGGING:
591 data = gxp->core_telemetry_mgr->logging_buff_data_legacy;
592 break;
593 case GXP_TELEMETRY_TYPE_TRACING:
594 data = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
595 break;
596 default:
597 ret = -EINVAL;
598 goto out;
599 }
600
601 if (!data) {
602 ret = -ENXIO;
603 goto out;
604 }
605
606 /* Map the buffers for any cores already running */
607 for (core = 0; core < GXP_NUM_CORES; core++) {
608 vd = gxp->core_to_vd[core];
609 if (vd != NULL) {
610 ret = gxp_dma_map_allocated_coherent_buffer(
611 gxp, &data->buffers[core], vd->domain, 0);
612 if (ret)
613 goto err;
614 }
615 }
616
617 /* Populate the buffer fields in firmware-data */
618 data->host_status |= GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
619 gxp_fw_data_set_core_telemetry_descriptors(gxp, type, data->host_status,
620 data->buffers, data->size);
621
622 /* Notify any running cores that firmware-data was updated */
623 for (core = 0; core < GXP_NUM_CORES; core++) {
624 if (gxp_is_fw_running(gxp, core))
625 gxp_notification_send(gxp, core,
626 CORE_NOTIF_TELEMETRY_STATUS);
627 }
628
629 refcount_inc(&data->ref_count);
630 data->is_enabled = true;
631
632 goto out;
633err:
634 while (core--) {
635 vd = gxp->core_to_vd[core];
636 if (vd)
637 gxp_dma_unmap_allocated_coherent_buffer(
638 gxp, vd->domain, &data->buffers[core]);
639 }
640
641out:
642 mutex_unlock(&gxp->core_telemetry_mgr->lock);
643 up_read(&gxp->vd_semaphore);
644
645 return ret;
646}
647
648/**
649 * notify_core_and_wait_for_disable() - Notify a core that telemetry state has
650 * been changed by the host and wait for
651 * the core to stop using telemetry.
652 * @gxp: The GXP device core telemetry is changing for
653 * @core: The core in @gxp to notify of the telemetry state change
654 * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
655 *
656 * Caller must hold `core_telemetry_mgr->lock`.
657 * Caller must hold `gxp->vd_semaphore` for reading only.
658 * It is not allowed to hold `gxp->vd_semaphore` for writing, since this
659 * function needs to release `gxp->vd_semaphore` at different points to sleep.
660 *
661 * Return:
662 * * 0 - Firmware on @core is no longer using telemetry of @type
663 * * -ENXIO - Firmware on @core is unresponsive
664 */
665static int notify_core_and_wait_for_disable(struct gxp_dev *gxp, uint core,
666 u8 type)
667{
668 uint retries_left = 50;
669
670 lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
671 lockdep_assert_held_read(&gxp->vd_semaphore);
672
673 gxp_notification_send(gxp, core, CORE_NOTIF_TELEMETRY_STATUS);
674
675 /* Wait for ACK from firmware */
676 while (is_telemetry_enabled(gxp, core, type) &&
677 gxp_is_fw_running(gxp, core) && retries_left) {
678 /* Release vd_semaphore while waiting */
679 up_read(&gxp->vd_semaphore);
680
681 /*
682 * The VD lock must be held to check if firmware is running, so
683 * the wait condition is only whether the firmware data has been
684 * updated to show the core disabling telemetry.
685 *
686 * If a core does stop running firmware while this function is
687 * asleep, it will be seen at the next timeout.
688 */
689 wait_event_timeout(gxp->core_telemetry_mgr->waitq,
690 !is_telemetry_enabled(gxp, core, type),
691 msecs_to_jiffies(10));
692 retries_left--;
693
694 /*
695 * No function may attempt to acquire the `vd_semaphore` while
696 * holding the core telemetry lock, so it must be released, then
697 * re-acquired once the `vd_semaphore` is held.
698 */
699 mutex_unlock(&gxp->core_telemetry_mgr->lock);
700 down_read(&gxp->vd_semaphore);
701 mutex_lock(&gxp->core_telemetry_mgr->lock);
702 }
703
704 /*
705 * If firmware has stopped running altogether, that is sufficient to be
706 * considered disabled. If firmware is started on this core again, it
707 * is responsible for clearing its status.
708 */
709 if (unlikely(is_telemetry_enabled(gxp, core, type) &&
710 gxp_is_fw_running(gxp, core)))
711 return -ENXIO;
712
713 return 0;
714}
715
716/**
717 * telemetry_disable_locked() - Helper function to break out the actual
718 * process of disabling core telemetry so that it
719 * can be invoked by internal functions that are
720 * already holding the core telemetry lock.
721 * @gxp: The GXP device to disable either logging or tracing for
722 * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
723 *
724 * Caller must hold `core_telemetry_mgr->lock`.
725 * Caller must hold `gxp->vd_semaphore` for reading only.
726 * It is not allowed to hold `gxp->vd_semaphore` for writing, since this
727 * function needs to release `gxp->vd_semaphore` at different points to sleep.
728 *
729 * Return:
730 * * 0 - Success
731 * * -EINVAL - The @type provided is not valid
732 * * -ENXIO - Buffers for @type have not been created/mapped yet
733 */
734static int telemetry_disable_locked(struct gxp_dev *gxp, u8 type)
735{
736 struct buffer_data *data;
737 int ret = 0;
738 uint core;
739 struct gxp_virtual_device *vd;
740
741 lockdep_assert_held(&gxp->core_telemetry_mgr->lock);
742 lockdep_assert_held_read(&gxp->vd_semaphore);
743
744 /* Cleanup core telemetry manager's book-keeping */
745 switch (type) {
746 case GXP_TELEMETRY_TYPE_LOGGING:
747 data = gxp->core_telemetry_mgr->logging_buff_data_legacy;
748 break;
749 case GXP_TELEMETRY_TYPE_TRACING:
750 data = gxp->core_telemetry_mgr->tracing_buff_data_legacy;
751 break;
752 default:
753 return -EINVAL;
754 }
755
756 if (!data)
757 return -ENXIO;
758
759 if (!(data->host_status & GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED))
760 return 0;
761
762 data->is_enabled = false;
763
764 /* Clear the log buffer fields in firmware-data */
765 data->host_status &= ~GXP_CORE_TELEMETRY_HOST_STATUS_ENABLED;
766 gxp_fw_data_set_core_telemetry_descriptors(gxp, type, data->host_status, NULL, 0);
767
768 /* Notify any running cores that firmware-data was updated */
769 for (core = 0; core < GXP_NUM_CORES; core++) {
770 if (gxp_is_fw_running(gxp, core)) {
771 ret = notify_core_and_wait_for_disable(gxp, core, type);
772 if (ret)
773 dev_warn(
774 gxp->dev,
775 "%s: core%u failed to disable telemetry (type=%u, ret=%d)\n",
776 __func__, core, type, ret);
777 }
778 vd = gxp->core_to_vd[core];
779 if (vd)
780 gxp_dma_unmap_allocated_coherent_buffer(
781 gxp, vd->domain, &data->buffers[core]);
782 }
783
784 if (refcount_dec_and_test(&data->ref_count)) {
785 switch (type) {
786 case GXP_TELEMETRY_TYPE_LOGGING:
787 gxp->core_telemetry_mgr->logging_buff_data_legacy = NULL;
788 break;
789 case GXP_TELEMETRY_TYPE_TRACING:
790 gxp->core_telemetry_mgr->tracing_buff_data_legacy = NULL;
791 break;
792 default:
793 /* NO-OP, we returned above if `type` was invalid */
794 break;
795 }
796 free_telemetry_buffers(gxp, data);
797 }
798
799 return 0;
800}
801
802int gxp_core_telemetry_disable(struct gxp_dev *gxp, u8 type)
803{
804 int ret;
805
806 /*
807 * `vd_semaphore` cannot be acquired while holding the core telemetry
808 * lock, so acquire it here before locking the core telemetry lock.
809 */
810 down_read(&gxp->vd_semaphore);
811 mutex_lock(&gxp->core_telemetry_mgr->lock);
812
813 ret = telemetry_disable_locked(gxp, type);
814
815 mutex_unlock(&gxp->core_telemetry_mgr->lock);
816 up_read(&gxp->vd_semaphore);
817
818 return ret;
819}
820
821int gxp_core_telemetry_register_eventfd(struct gxp_dev *gxp, u8 type, int fd)
822{
823 struct eventfd_ctx *new_ctx;
824 struct eventfd_ctx **ctx_to_set = NULL;
825 int ret = 0;
826
827 new_ctx = eventfd_ctx_fdget(fd);
828 if (IS_ERR(new_ctx))
829 return PTR_ERR(new_ctx);
830
831 mutex_lock(&gxp->core_telemetry_mgr->lock);
832
833 switch (type) {
834 case GXP_TELEMETRY_TYPE_LOGGING:
835 ctx_to_set = &gxp->core_telemetry_mgr->logging_efd;
836 break;
837 case GXP_TELEMETRY_TYPE_TRACING:
838 ctx_to_set = &gxp->core_telemetry_mgr->tracing_efd;
839 break;
840 default:
841 ret = -EINVAL;
Aurora pro automergerec2e64f2023-03-14 02:07:34 +0000842 eventfd_ctx_put(new_ctx);
Aurora pro automerger8a4b0d92023-01-06 06:59:41 +0000843 goto out;
844 }
845
846 if (*ctx_to_set) {
847 dev_warn(
848 gxp->dev,
849 "Replacing existing core telemetry eventfd (type=%u)\n",
850 type);
851 eventfd_ctx_put(*ctx_to_set);
852 }
853
854 *ctx_to_set = new_ctx;
855
856out:
857 mutex_unlock(&gxp->core_telemetry_mgr->lock);
858 return ret;
859}
860
861int gxp_core_telemetry_unregister_eventfd(struct gxp_dev *gxp, u8 type)
862{
863 int ret = 0;
864
865 mutex_lock(&gxp->core_telemetry_mgr->lock);
866
867 switch (type) {
868 case GXP_TELEMETRY_TYPE_LOGGING:
869 if (gxp->core_telemetry_mgr->logging_efd)
870 eventfd_ctx_put(gxp->core_telemetry_mgr->logging_efd);
871 gxp->core_telemetry_mgr->logging_efd = NULL;
872 break;
873 case GXP_TELEMETRY_TYPE_TRACING:
874 if (gxp->core_telemetry_mgr->tracing_efd)
875 eventfd_ctx_put(gxp->core_telemetry_mgr->tracing_efd);
876 gxp->core_telemetry_mgr->tracing_efd = NULL;
877 break;
878 default:
879 ret = -EINVAL;
880 }
881
882 mutex_unlock(&gxp->core_telemetry_mgr->lock);
883
884 return ret;
885}
886
887struct work_struct *
888gxp_core_telemetry_get_notification_handler(struct gxp_dev *gxp, uint core)
889{
890 struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
891
892 if (!mgr || core >= GXP_NUM_CORES)
893 return NULL;
894
895 return &mgr->notification_works[core].work;
896}
897
898void gxp_core_telemetry_exit(struct gxp_dev *gxp)
899{
900 struct buffer_data *log_buff_data, *trace_buff_data;
901 struct gxp_core_telemetry_manager *mgr = gxp->core_telemetry_mgr;
902
903 if (!mgr) {
904 dev_warn(gxp->dev, "Core telemetry manager was not allocated\n");
905 return;
906 }
907
908 /* TODO(b/260959553): Remove mutex_lock/unlock during legacy telemetry removal */
909 mutex_lock(&gxp->core_telemetry_mgr->lock);
910 log_buff_data = mgr->logging_buff_data;
911 trace_buff_data = mgr->tracing_buff_data;
912
913 if (!IS_ERR_OR_NULL(log_buff_data))
914 free_telemetry_buffers(gxp, log_buff_data);
915
916 if (!IS_ERR_OR_NULL(trace_buff_data))
917 free_telemetry_buffers(gxp, trace_buff_data);
918
919 mutex_unlock(&gxp->core_telemetry_mgr->lock);
920
921 if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr->logging_efd)) {
922 dev_warn(gxp->dev, "logging_efd was not released\n");
923 eventfd_ctx_put(gxp->core_telemetry_mgr->logging_efd);
924 gxp->core_telemetry_mgr->logging_efd = NULL;
925 }
926
927 if (!IS_ERR_OR_NULL(gxp->core_telemetry_mgr->tracing_efd)) {
928 dev_warn(gxp->dev, "tracing_efd was not released\n");
929 eventfd_ctx_put(gxp->core_telemetry_mgr->tracing_efd);
930 gxp->core_telemetry_mgr->tracing_efd = NULL;
931 }
932
933 mutex_destroy(&mgr->lock);
934 devm_kfree(gxp->dev, mgr);
935 gxp->core_telemetry_mgr = NULL;
936}