blob: 1165a28dc61a51fd157b3d5024ad47f4f4988a86 [file] [log] [blame]
John Scheiblede164752021-10-26 13:45:19 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * GXP debug dump handler
4 *
5 * Copyright (C) 2020 Google LLC
6 */
7
8#include <linux/bitops.h>
9#include <linux/delay.h>
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
Aurora pro automerger399265c2022-06-14 13:50:26 -070012#include <linux/moduleparam.h>
John Scheiblede164752021-10-26 13:45:19 -070013#include <linux/slab.h>
14#include <linux/string.h>
15#include <linux/workqueue.h>
16
Neela Chithirala3ccb2472022-01-17 04:41:54 +000017#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
John Scheiblede164752021-10-26 13:45:19 -070018#include <linux/platform_data/sscoredump.h>
19#endif
20
21#include "gxp-debug-dump.h"
Aurora pro automerger399265c2022-06-14 13:50:26 -070022#include "gxp-dma.h"
John Scheiblede164752021-10-26 13:45:19 -070023#include "gxp-doorbell.h"
Aurora pro automerger399265c2022-06-14 13:50:26 -070024#include "gxp-firmware.h"
25#include "gxp-host-device-structs.h"
John Scheiblede164752021-10-26 13:45:19 -070026#include "gxp-internal.h"
27#include "gxp-lpm.h"
Aurora pro automerger35e34032022-05-14 14:55:22 -070028#include "gxp-mapping.h"
Aurora pro automerger399265c2022-06-14 13:50:26 -070029#include "gxp-pm.h"
Aurora pro automerger35e34032022-05-14 14:55:22 -070030#include "gxp-vd.h"
Aurora pro automerger399265c2022-06-14 13:50:26 -070031#include "gxp-wakelock.h"
John Scheiblede164752021-10-26 13:45:19 -070032
Neela Chithirala49087422022-02-14 03:48:34 +000033#define SSCD_MSG_LENGTH 64
John Scheiblede164752021-10-26 13:45:19 -070034
Aurora pro automerger27bed782022-04-28 17:59:17 +080035#define SYNC_BARRIER_BLOCK 0x00100000
36#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
37
Aurora pro automerger399265c2022-06-14 13:50:26 -070038#define DEBUG_DUMP_MEMORY_SIZE 0x400000 /* size in bytes */
39
John Scheiblede164752021-10-26 13:45:19 -070040/* Enum indicating the debug dump request reason. */
41enum gxp_debug_dump_init_type {
42 DEBUG_DUMP_FW_INIT,
43 DEBUG_DUMP_KERNEL_INIT
44};
45
46enum gxp_common_segments_idx {
47 GXP_COMMON_REGISTERS_IDX,
48 GXP_LPM_REGISTERS_IDX
49};
50
Aurora pro automerger399265c2022-06-14 13:50:26 -070051/* Whether or not the debug dump subsystem should be enabled. */
52static int gxp_debug_dump_enable;
53module_param_named(debug_dump_enable, gxp_debug_dump_enable, int, 0660);
54
John Scheiblede164752021-10-26 13:45:19 -070055static void gxp_debug_dump_cache_invalidate(struct gxp_dev *gxp)
56{
57 /* Debug dump carveout is currently coherent. NO-OP. */
58 return;
59}
60
61static void gxp_debug_dump_cache_flush(struct gxp_dev *gxp)
62{
63 /* Debug dump carveout is currently coherent. NO-OP. */
64 return;
65}
66
Aurora pro automerger27bed782022-04-28 17:59:17 +080067static u32 gxp_read_sync_barrier_shadow(struct gxp_dev *gxp, uint index)
68{
69 uint barrier_reg_offset;
70
71 if (index >= SYNC_BARRIER_COUNT) {
72 dev_err(gxp->dev,
73 "Attempt to read non-existent sync barrier: %0u\n",
74 index);
75 return 0;
76 }
77
78 barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index) +
79 SYNC_BARRIER_SHADOW_OFFSET;
80
81 return gxp_read_32(gxp, barrier_reg_offset);
82}
John Scheiblede164752021-10-26 13:45:19 -070083
84static void
85gxp_get_common_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
86 struct gxp_common_registers *common_regs)
87{
88 int i;
89 u32 addr;
90
91 dev_dbg(gxp->dev, "Getting common registers\n");
92
93 strscpy(seg_header->name, "Common Registers", sizeof(seg_header->name));
94 seg_header->valid = 1;
95 seg_header->size = sizeof(*common_regs);
96
97 /* Get Aurora Top registers */
98 common_regs->aurora_revision =
99 gxp_read_32(gxp, GXP_REG_AURORA_REVISION);
100 common_regs->common_int_pol_0 =
101 gxp_read_32(gxp, GXP_REG_COMMON_INT_POL_0);
102 common_regs->common_int_pol_1 =
103 gxp_read_32(gxp, GXP_REG_COMMON_INT_POL_1);
104 common_regs->dedicated_int_pol =
105 gxp_read_32(gxp, GXP_REG_DEDICATED_INT_POL);
106 common_regs->raw_ext_int = gxp_read_32(gxp, GXP_REG_RAW_EXT_INT);
107
108 for (i = 0; i < CORE_PD_COUNT; i++) {
109 common_regs->core_pd[i] =
110 gxp_read_32(gxp, GXP_REG_CORE_PD + CORE_PD_BASE(i));
111 }
112
113 common_regs->global_counter_low =
114 gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
115 common_regs->global_counter_high =
116 gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
117 common_regs->wdog_control = gxp_read_32(gxp, GXP_REG_WDOG_CONTROL);
118 common_regs->wdog_value = gxp_read_32(gxp, GXP_REG_WDOG_VALUE);
119
120 for (i = 0; i < TIMER_COUNT; i++) {
121 addr = GXP_REG_TIMER_COMPARATOR + TIMER_BASE(i);
122 common_regs->timer[i].comparator =
123 gxp_read_32(gxp, addr + TIMER_COMPARATOR_OFFSET);
124 common_regs->timer[i].control =
125 gxp_read_32(gxp, addr + TIMER_CONTROL_OFFSET);
126 common_regs->timer[i].value =
127 gxp_read_32(gxp, addr + TIMER_VALUE_OFFSET);
128 }
129
130 /* Get Doorbell registers */
131 for (i = 0; i < DOORBELL_COUNT; i++)
132 common_regs->doorbell[i] = gxp_doorbell_status(gxp, i);
133
134 /* Get Sync Barrier registers */
135 for (i = 0; i < SYNC_BARRIER_COUNT; i++)
136 common_regs->sync_barrier[i] =
137 gxp_read_sync_barrier_shadow(gxp, i);
138
139 dev_dbg(gxp->dev, "Done getting common registers\n");
140}
141
142static void gxp_get_lpm_psm_registers(struct gxp_dev *gxp,
143 struct gxp_lpm_psm_registers *psm_regs,
144 int psm)
145{
146 struct gxp_lpm_state_table_registers *state_table_regs;
147 int i, j;
148 uint offset;
149
150 /* Get State Table registers */
151 for (i = 0; i < PSM_STATE_TABLE_COUNT; i++) {
152 state_table_regs = &psm_regs->state_table[i];
153
154 /* Get Trans registers */
155 for (j = 0; j < PSM_TRANS_COUNT; j++) {
156 offset = PSM_STATE_TABLE_BASE(i) + PSM_TRANS_BASE(j);
157 state_table_regs->trans[j].next_state =
158 lpm_read_32_psm(gxp, psm, offset +
159 PSM_NEXT_STATE_OFFSET);
160 state_table_regs->trans[j].seq_addr =
161 lpm_read_32_psm(gxp, psm, offset +
162 PSM_SEQ_ADDR_OFFSET);
163 state_table_regs->trans[j].timer_val =
164 lpm_read_32_psm(gxp, psm, offset +
165 PSM_TIMER_VAL_OFFSET);
166 state_table_regs->trans[j].timer_en =
167 lpm_read_32_psm(gxp, psm, offset +
168 PSM_TIMER_EN_OFFSET);
169 state_table_regs->trans[j].trigger_num =
170 lpm_read_32_psm(gxp, psm, offset +
171 PSM_TRIGGER_NUM_OFFSET);
172 state_table_regs->trans[j].trigger_en =
173 lpm_read_32_psm(gxp, psm, offset +
174 PSM_TRIGGER_EN_OFFSET);
175 }
176
177 state_table_regs->enable_state =
178 lpm_read_32_psm(gxp, psm, PSM_STATE_TABLE_BASE(i) +
179 PSM_ENABLE_STATE_OFFSET);
180 }
181
182 /* Get DMEM registers */
183 for (i = 0; i < PSM_DATA_COUNT; i++) {
184 offset = PSM_DMEM_BASE(i) + PSM_DATA_OFFSET;
185 psm_regs->data[i] = lpm_read_32_psm(gxp, psm, offset);
186 }
187
188 psm_regs->cfg = lpm_read_32_psm(gxp, psm, PSM_CFG_OFFSET);
189 psm_regs->status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
190
191 /* Get Debug CSR registers */
192 psm_regs->debug_cfg = lpm_read_32_psm(gxp, psm, PSM_DEBUG_CFG_OFFSET);
193 psm_regs->break_addr = lpm_read_32_psm(gxp, psm, PSM_BREAK_ADDR_OFFSET);
194 psm_regs->gpin_lo_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_LO_RD_OFFSET);
195 psm_regs->gpin_hi_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_HI_RD_OFFSET);
196 psm_regs->gpout_lo_rd =
197 lpm_read_32_psm(gxp, psm, PSM_GPOUT_LO_RD_OFFSET);
198 psm_regs->gpout_hi_rd =
199 lpm_read_32_psm(gxp, psm, PSM_GPOUT_HI_RD_OFFSET);
200 psm_regs->debug_status =
201 lpm_read_32_psm(gxp, psm, PSM_DEBUG_STATUS_OFFSET);
202}
203
204static void
205gxp_get_lpm_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
206 struct gxp_lpm_registers *lpm_regs)
207{
208 int i;
209 uint offset;
210
211 dev_dbg(gxp->dev, "Getting LPM registers\n");
212
213 strscpy(seg_header->name, "LPM Registers", sizeof(seg_header->name));
214 seg_header->valid = 1;
215 seg_header->size = sizeof(*lpm_regs);
216
217 /* Get LPM Descriptor registers */
218 lpm_regs->lpm_version = lpm_read_32(gxp, LPM_VERSION_OFFSET);
219 lpm_regs->trigger_csr_start =
220 lpm_read_32(gxp, TRIGGER_CSR_START_OFFSET);
221 lpm_regs->imem_start = lpm_read_32(gxp, IMEM_START_OFFSET);
222 lpm_regs->lpm_config = lpm_read_32(gxp, LPM_CONFIG_OFFSET);
223
224 for (i = 0; i < PSM_DESCRIPTOR_COUNT; i++) {
225 offset = PSM_DESCRIPTOR_OFFSET + PSM_DESCRIPTOR_BASE(i);
226 lpm_regs->psm_descriptor[i] = lpm_read_32(gxp, offset);
227 }
228
229 /* Get Trigger CSR registers */
230 for (i = 0; i < EVENTS_EN_COUNT; i++) {
231 offset = EVENTS_EN_OFFSET + EVENTS_EN_BASE(i);
232 lpm_regs->events_en[i] = lpm_read_32(gxp, offset);
233 }
234
235 for (i = 0; i < EVENTS_INV_COUNT; i++) {
236 offset = EVENTS_INV_OFFSET + EVENTS_INV_BASE(i);
237 lpm_regs->events_inv[i] = lpm_read_32(gxp, offset);
238 }
239
240 lpm_regs->function_select = lpm_read_32(gxp, FUNCTION_SELECT_OFFSET);
241 lpm_regs->trigger_status = lpm_read_32(gxp, TRIGGER_STATUS_OFFSET);
242 lpm_regs->event_status = lpm_read_32(gxp, EVENT_STATUS_OFFSET);
243
244 /* Get IMEM registers */
245 for (i = 0; i < OPS_COUNT; i++) {
246 offset = OPS_OFFSET + OPS_BASE(i);
247 lpm_regs->ops[i] = lpm_read_32(gxp, offset);
248 }
249
250 /* Get PSM registers */
251 for (i = 0; i < PSM_COUNT; i++)
252 gxp_get_lpm_psm_registers(gxp, &lpm_regs->psm_regs[i], i);
253
254 dev_dbg(gxp->dev, "Done getting LPM registers\n");
255}
256
Aurora pro automerger399265c2022-06-14 13:50:26 -0700257/*
258 * Caller must make sure that gxp->debug_dump_mgr->common_dump is not NULL.
259 */
260static int gxp_get_common_dump(struct gxp_dev *gxp)
John Scheiblede164752021-10-26 13:45:19 -0700261{
Neela Chithirala49087422022-02-14 03:48:34 +0000262 struct gxp_common_dump *common_dump = gxp->debug_dump_mgr->common_dump;
263 struct gxp_seg_header *common_seg_header = common_dump->seg_header;
264 struct gxp_common_dump_data *common_dump_data =
265 &common_dump->common_dump_data;
Aurora pro automerger399265c2022-06-14 13:50:26 -0700266 int ret;
267
268 /* Power on BLK_AUR to read the common registers */
269 ret = gxp_wakelock_acquire(gxp);
270 if (ret) {
271 dev_err(gxp->dev,
272 "Failed to acquire wakelock for getting common dump\n");
273 return ret;
274 }
275 gxp_pm_update_requested_power_states(gxp, AUR_OFF, true, AUR_UUD, false,
276 AUR_MEM_UNDEFINED,
277 AUR_MEM_UNDEFINED);
Neela Chithirala49087422022-02-14 03:48:34 +0000278
John Scheiblede164752021-10-26 13:45:19 -0700279 gxp_get_common_registers(gxp,
280 &common_seg_header[GXP_COMMON_REGISTERS_IDX],
281 &common_dump_data->common_regs);
282 gxp_get_lpm_registers(gxp, &common_seg_header[GXP_LPM_REGISTERS_IDX],
283 &common_dump_data->lpm_regs);
284
Aurora pro automerger399265c2022-06-14 13:50:26 -0700285 gxp_wakelock_release(gxp);
286 gxp_pm_update_requested_power_states(gxp, AUR_UUD, false, AUR_OFF, true,
287 AUR_MEM_UNDEFINED,
288 AUR_MEM_UNDEFINED);
289
John Scheiblede164752021-10-26 13:45:19 -0700290 dev_dbg(gxp->dev, "Segment Header for Common Segment\n");
291 dev_dbg(gxp->dev, "Name: %s, Size: 0x%0x bytes, Valid :%0x\n",
292 common_seg_header->name, common_seg_header->size,
293 common_seg_header->valid);
294 dev_dbg(gxp->dev, "Register aurora_revision: 0x%0x\n",
295 common_dump_data->common_regs.aurora_revision);
Aurora pro automerger399265c2022-06-14 13:50:26 -0700296
297 return ret;
John Scheiblede164752021-10-26 13:45:19 -0700298}
299
Neela Chithirala49087422022-02-14 03:48:34 +0000300#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
301static void gxp_send_to_sscd(struct gxp_dev *gxp, void *segs, int seg_cnt,
Neela Chithirala6d713382022-03-03 05:17:38 +0000302 const char *info)
303{
Neela Chithirala49087422022-02-14 03:48:34 +0000304 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
305 struct sscd_platform_data *pdata =
306 (struct sscd_platform_data *)mgr->sscd_pdata;
307
308 if (!pdata->sscd_report) {
309 dev_err(gxp->dev, "Failed to generate coredump\n");
310 return;
311 }
312
313 if (pdata->sscd_report(gxp->debug_dump_mgr->sscd_dev, segs, seg_cnt,
314 SSCD_FLAGS_ELFARM64HDR, info)) {
315 dev_err(gxp->dev, "Unable to send the report to SSCD daemon\n");
316 return;
317 }
John Scheiblede164752021-10-26 13:45:19 -0700318}
319
Aurora pro automerger35e34032022-05-14 14:55:22 -0700320/*
321 * `user_bufs` is an input buffer containing up to GXP_NUM_BUFFER_MAPPINGS
322 * virtual addresses
323 */
Aurora pro automerger399265c2022-06-14 13:50:26 -0700324static int gxp_add_user_buffer_to_segments(struct gxp_dev *gxp,
325 struct gxp_core_header *core_header,
326 int core_id, int seg_idx,
327 void *user_bufs[])
Aurora pro automerger35e34032022-05-14 14:55:22 -0700328{
329 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
330 struct gxp_user_buffer user_buf;
331 int i;
332
Aurora pro automerger399265c2022-06-14 13:50:26 -0700333 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
Aurora pro automerger35e34032022-05-14 14:55:22 -0700334 user_buf = core_header->user_bufs[i];
335 if (user_buf.size == 0)
336 continue;
Aurora pro automerger399265c2022-06-14 13:50:26 -0700337 if (seg_idx >= GXP_NUM_SEGMENTS_PER_CORE)
338 return -EFAULT;
339
Aurora pro automerger35e34032022-05-14 14:55:22 -0700340 mgr->segs[core_id][seg_idx].addr = user_bufs[i];
341 mgr->segs[core_id][seg_idx].size = user_buf.size;
342 seg_idx++;
343 }
Aurora pro automerger399265c2022-06-14 13:50:26 -0700344
345 return 0;
Aurora pro automerger35e34032022-05-14 14:55:22 -0700346}
347
Aurora pro automerger399265c2022-06-14 13:50:26 -0700348/*
349 * Caller must have locked `gxp->vd_semaphore` for reading.
350 */
Aurora pro automerger35e34032022-05-14 14:55:22 -0700351static void gxp_user_buffers_vunmap(struct gxp_dev *gxp,
352 struct gxp_core_header *core_header)
353{
354 struct gxp_virtual_device *vd;
355 struct gxp_user_buffer user_buf;
356 int i;
357 struct gxp_mapping *mapping;
358
Aurora pro automerger399265c2022-06-14 13:50:26 -0700359 lockdep_assert_held(&gxp->vd_semaphore);
Aurora pro automerger35e34032022-05-14 14:55:22 -0700360
Aurora pro automerger399265c2022-06-14 13:50:26 -0700361 /*
362 * TODO (b/234172464): When implementing per-core debug dump locks,
363 * down_read(&gxp->vd_semaphore) must be re-added before accessing
364 * gxp->core_to_vd[], and up_read(&gxp->vd_semaphore) must be re-added
365 * after.
366 */
Aurora pro automerger35e34032022-05-14 14:55:22 -0700367 vd = gxp->core_to_vd[core_header->core_id];
368 if (!vd) {
369 dev_err(gxp->dev, "Virtual device is not available for vunmap\n");
Aurora pro automerger399265c2022-06-14 13:50:26 -0700370 return;
Aurora pro automerger35e34032022-05-14 14:55:22 -0700371 }
372
373 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
374 user_buf = core_header->user_bufs[i];
375 if (user_buf.size == 0)
376 continue;
377
378 mapping = gxp_vd_mapping_search_in_range(
379 vd, (dma_addr_t)user_buf.device_addr);
380 if (!mapping) {
381 dev_err(gxp->dev,
382 "No mapping found for user buffer at device address %#llX\n",
383 user_buf.device_addr);
384 continue;
385 }
386
387 gxp_mapping_vunmap(mapping);
388 gxp_mapping_put(mapping);
389 }
Aurora pro automerger35e34032022-05-14 14:55:22 -0700390}
391
Aurora pro automerger399265c2022-06-14 13:50:26 -0700392/*
393 * Caller must have locked `gxp->vd_semaphore` for reading.
394 */
Aurora pro automerger35e34032022-05-14 14:55:22 -0700395static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
396 struct gxp_core_header *core_header,
397 void *user_buf_vaddrs[])
398{
399 struct gxp_virtual_device *vd;
Aurora pro automerger399265c2022-06-14 13:50:26 -0700400 struct gxp_user_buffer *user_buf;
Aurora pro automerger35e34032022-05-14 14:55:22 -0700401 int i, cnt = 0;
402 dma_addr_t daddr;
403 struct gxp_mapping *mapping;
404 void *vaddr;
405
Aurora pro automerger399265c2022-06-14 13:50:26 -0700406 lockdep_assert_held(&gxp->vd_semaphore);
Aurora pro automerger35e34032022-05-14 14:55:22 -0700407
Aurora pro automerger399265c2022-06-14 13:50:26 -0700408 /*
409 * TODO (b/234172464): When implementing per-core debug dump locks,
410 * down_read(&gxp->vd_semaphore) must be re-added before accessing
411 * gxp->core_to_vd[], and up_read(&gxp->vd_semaphore) must be re-added
412 * after.
413 */
Aurora pro automerger35e34032022-05-14 14:55:22 -0700414 vd = gxp->core_to_vd[core_header->core_id];
415 if (!vd) {
416 dev_err(gxp->dev, "Virtual device is not available for vmap\n");
417 goto out;
418 }
419
420 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
Aurora pro automerger399265c2022-06-14 13:50:26 -0700421 user_buf = &core_header->user_bufs[i];
422 if (user_buf->size == 0)
Aurora pro automerger35e34032022-05-14 14:55:22 -0700423 continue;
424
425 /* Get mapping */
Aurora pro automerger399265c2022-06-14 13:50:26 -0700426 daddr = (dma_addr_t)user_buf->device_addr;
Aurora pro automerger35e34032022-05-14 14:55:22 -0700427 mapping = gxp_vd_mapping_search_in_range(vd, daddr);
428 if (!mapping) {
Aurora pro automerger399265c2022-06-14 13:50:26 -0700429 user_buf->size = 0;
Aurora pro automerger35e34032022-05-14 14:55:22 -0700430 continue;
431 }
432
433 /* Map the mapping into kernel space */
434 vaddr = gxp_mapping_vmap(mapping);
435
436 /*
437 * Release the reference from searching for the mapping.
438 * Either vmapping was successful and obtained a new reference
439 * or vmapping failed, and the gxp_mapping is no longer needed.
440 */
441 gxp_mapping_put(mapping);
442
443 if (IS_ERR(vaddr)) {
Aurora pro automerger35e34032022-05-14 14:55:22 -0700444 gxp_user_buffers_vunmap(gxp, core_header);
445 return 0;
446 }
447
448 /* Get kernel address of the user buffer inside the mapping */
449 user_buf_vaddrs[i] =
450 vaddr + daddr -
451 (mapping->device_address & ~(PAGE_SIZE - 1));
Aurora pro automerger399265c2022-06-14 13:50:26 -0700452
453 /* Check that the entire user buffer is mapped */
454 if ((user_buf_vaddrs[i] + user_buf->size) >
455 (vaddr + mapping->size)) {
456 gxp_user_buffers_vunmap(gxp, core_header);
457 return 0;
458 }
459
Aurora pro automerger35e34032022-05-14 14:55:22 -0700460 cnt++;
461 }
462
463out:
Aurora pro automerger35e34032022-05-14 14:55:22 -0700464 return cnt;
465}
466#endif
467
Aurora pro automerger399265c2022-06-14 13:50:26 -0700468static void gxp_invalidate_segments(struct gxp_dev *gxp, uint32_t core_id)
469{
470 int i;
471 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
472 struct gxp_core_dump *core_dump;
473 struct gxp_common_dump *common_dump;
474 struct gxp_core_dump_header *core_dump_header;
475
476 core_dump = mgr->core_dump;
477 common_dump = mgr->common_dump;
478 if (!core_dump || !common_dump) {
479 dev_dbg(gxp->dev,
480 "Failed to get core_dump or common_dump for invalidating segments\n");
481 return;
482 }
483
484 core_dump_header = &core_dump->core_dump_header[core_id];
485 if (!core_dump_header) {
486 dev_dbg(gxp->dev,
487 "Failed to get core_dump_header for invalidating segments\n");
488 return;
489 }
490
491 for (i = 0; i < GXP_NUM_COMMON_SEGMENTS; i++)
492 common_dump->seg_header[i].valid = 0;
493
494 for (i = 0; i < GXP_NUM_CORE_SEGMENTS; i++)
495 core_dump_header->seg_header[i].valid = 0;
496
497 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++)
498 core_dump_header->core_header.user_bufs[i].size = 0;
499
500 core_dump_header->core_header.dump_available = 0;
501}
502
503/*
504 * Caller must make sure that gxp->debug_dump_mgr->common_dump and
505 * gxp->debug_dump_mgr->core_dump are not NULL.
506 */
507static int gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
Neela Chithirala6d713382022-03-03 05:17:38 +0000508{
Neela Chithirala49087422022-02-14 03:48:34 +0000509 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
Aurora pro automerger399265c2022-06-14 13:50:26 -0700510 struct gxp_core_dump *core_dump = mgr->core_dump;
Neela Chithirala49087422022-02-14 03:48:34 +0000511 struct gxp_core_dump_header *core_dump_header =
Aurora pro automerger399265c2022-06-14 13:50:26 -0700512 &core_dump->core_dump_header[core_id];
Aurora pro automerger35e34032022-05-14 14:55:22 -0700513 struct gxp_core_header *core_header = &core_dump_header->core_header;
Aurora pro automerger399265c2022-06-14 13:50:26 -0700514 int ret = 0;
515#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
516 struct gxp_common_dump *common_dump = mgr->common_dump;
517 int i;
518 int seg_idx = 0;
519 void *data_addr;
Neela Chithirala49087422022-02-14 03:48:34 +0000520 char sscd_msg[SSCD_MSG_LENGTH];
Aurora pro automerger35e34032022-05-14 14:55:22 -0700521 void *user_buf_vaddrs[GXP_NUM_BUFFER_MAPPINGS];
522 int user_buf_cnt;
Aurora pro automerger399265c2022-06-14 13:50:26 -0700523#endif
John Scheiblede164752021-10-26 13:45:19 -0700524
Aurora pro automerger399265c2022-06-14 13:50:26 -0700525 /* Core */
526 if (!core_header->dump_available) {
527 dev_err(gxp->dev, "Core dump should have been available\n");
528 ret = -EINVAL;
529 goto out;
Aurora pro automerger35e34032022-05-14 14:55:22 -0700530 }
531
Aurora pro automerger399265c2022-06-14 13:50:26 -0700532#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
533 /* Common */
534 data_addr = &common_dump->common_dump_data.common_regs;
535 for (i = 0; i < GXP_NUM_COMMON_SEGMENTS; i++) {
536 if (seg_idx >= GXP_NUM_SEGMENTS_PER_CORE) {
537 ret = -EFAULT;
538 goto out_efault;
539 }
540 mgr->segs[core_id][seg_idx].addr = data_addr;
541 mgr->segs[core_id][seg_idx].size =
542 common_dump->seg_header[i].size;
543 data_addr += mgr->segs[core_id][seg_idx].size;
544 seg_idx++;
545 }
Aurora pro automerger35e34032022-05-14 14:55:22 -0700546
Aurora pro automerger399265c2022-06-14 13:50:26 -0700547 /* Core Header */
548 if (seg_idx >= GXP_NUM_SEGMENTS_PER_CORE) {
549 ret = -EFAULT;
550 goto out_efault;
551 }
552 mgr->segs[core_id][seg_idx].addr = core_header;
553 mgr->segs[core_id][seg_idx].size = sizeof(struct gxp_core_header);
554 seg_idx++;
555
556 data_addr = &core_dump->dump_data[core_id *
557 core_header->core_dump_size /
558 sizeof(u32)];
559
560 for (i = 0; i < GXP_NUM_CORE_SEGMENTS - 1; i++) {
561 if (seg_idx >= GXP_NUM_SEGMENTS_PER_CORE) {
562 ret = -EFAULT;
563 goto out_efault;
564 }
565 mgr->segs[core_id][seg_idx].addr = data_addr;
566 mgr->segs[core_id][seg_idx].size = 0;
567 if (core_dump_header->seg_header[i].valid) {
568 mgr->segs[core_id][seg_idx].size =
569 core_dump_header->seg_header[i].size;
570 }
571
572 data_addr += core_dump_header->seg_header[i].size;
573 seg_idx++;
574 }
575
576 /* DRAM */
577 if (seg_idx >= GXP_NUM_SEGMENTS_PER_CORE) {
578 ret = -EFAULT;
579 goto out_efault;
580 }
581 mgr->segs[core_id][seg_idx].addr = gxp->fwbufs[core_id].vaddr;
582 mgr->segs[core_id][seg_idx].size = gxp->fwbufs[core_id].size;
583 seg_idx++;
584
585 /* User Buffers */
586 user_buf_cnt = gxp_user_buffers_vmap(gxp, core_header, user_buf_vaddrs);
587 if (user_buf_cnt > 0) {
588 if (gxp_add_user_buffer_to_segments(gxp, core_header, core_id,
589 seg_idx, user_buf_vaddrs)) {
590 gxp_user_buffers_vunmap(gxp, core_header);
591 ret = -EFAULT;
592 goto out_efault;
593 }
594 }
595
596out_efault:
597 if (ret) {
598 dev_err(gxp->dev,
599 "seg_idx %x is larger than the size of the array\n",
600 seg_idx);
601 } else {
602 dev_dbg(gxp->dev, "Passing dump data to SSCD daemon\n");
603 snprintf(sscd_msg, SSCD_MSG_LENGTH - 1,
604 "gxp debug dump (core %0x)", core_id);
605 gxp_send_to_sscd(gxp, mgr->segs[core_id],
606 seg_idx + user_buf_cnt, sscd_msg);
607
608 gxp_user_buffers_vunmap(gxp, core_header);
609 }
Neela Chithirala49087422022-02-14 03:48:34 +0000610#endif
Aurora pro automerger399265c2022-06-14 13:50:26 -0700611
612out:
613 gxp_invalidate_segments(gxp, core_id);
614
615 return ret;
Neela Chithirala49087422022-02-14 03:48:34 +0000616}
617
Aurora pro automerger399265c2022-06-14 13:50:26 -0700618static int gxp_init_segments(struct gxp_dev *gxp)
Neela Chithirala6d713382022-03-03 05:17:38 +0000619{
Aurora pro automerger399265c2022-06-14 13:50:26 -0700620#if !IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
621 return 0;
622#else
623 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
Neela Chithirala49087422022-02-14 03:48:34 +0000624
Aurora pro automerger399265c2022-06-14 13:50:26 -0700625 mgr->common_dump = kzalloc(sizeof(*mgr->common_dump), GFP_KERNEL);
626 if (!mgr->common_dump)
627 return -ENOMEM;
Neela Chithirala49087422022-02-14 03:48:34 +0000628
Aurora pro automerger399265c2022-06-14 13:50:26 -0700629 return 0;
630#endif
Neela Chithirala49087422022-02-14 03:48:34 +0000631}
632
Aurora pro automerger399265c2022-06-14 13:50:26 -0700633/*
634 * Caller must have locked `gxp->debug_dump_mgr->debug_dump_lock` before calling
635 * `gxp_generate_coredump`.
636 */
Neela Chithirala49087422022-02-14 03:48:34 +0000637static int gxp_generate_coredump(struct gxp_dev *gxp, uint32_t core_id)
638{
Aurora pro automerger399265c2022-06-14 13:50:26 -0700639 int ret = 0;
640
641 if (!gxp->debug_dump_mgr->core_dump ||
642 !gxp->debug_dump_mgr->common_dump) {
643 dev_err(gxp->dev, "Memory is not allocated for debug dump\n");
John Scheiblede164752021-10-26 13:45:19 -0700644 return -EINVAL;
645 }
646
John Scheiblede164752021-10-26 13:45:19 -0700647 gxp_debug_dump_cache_invalidate(gxp);
648
Aurora pro automerger399265c2022-06-14 13:50:26 -0700649 ret = gxp_get_common_dump(gxp);
650 if (ret)
651 goto out;
John Scheiblede164752021-10-26 13:45:19 -0700652
Aurora pro automerger399265c2022-06-14 13:50:26 -0700653 ret = gxp_handle_debug_dump(gxp, core_id);
654 if (ret)
655 goto out;
John Scheiblede164752021-10-26 13:45:19 -0700656
Aurora pro automerger399265c2022-06-14 13:50:26 -0700657out:
John Scheiblede164752021-10-26 13:45:19 -0700658 gxp_debug_dump_cache_flush(gxp);
659
Aurora pro automerger399265c2022-06-14 13:50:26 -0700660 return ret;
John Scheiblede164752021-10-26 13:45:19 -0700661}
662
Aurora pro automerger399265c2022-06-14 13:50:26 -0700663static void gxp_debug_dump_process_dump(struct work_struct *work)
John Scheiblede164752021-10-26 13:45:19 -0700664{
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000665 struct gxp_debug_dump_work *debug_dump_work =
666 container_of(work, struct gxp_debug_dump_work, work);
667
668 uint core_id = debug_dump_work->core_id;
669 struct gxp_dev *gxp = debug_dump_work->gxp;
Aurora pro automerger399265c2022-06-14 13:50:26 -0700670 u32 boot_mode;
671 bool gxp_generate_coredump_called = false;
John Scheiblede164752021-10-26 13:45:19 -0700672
Aurora pro automerger399265c2022-06-14 13:50:26 -0700673 mutex_lock(&gxp->debug_dump_mgr->debug_dump_lock);
674
675 /*
676 * Lock the VD semaphore to ensure no suspend/resume/start/stop requests
677 * can be made on core `core_id` while generating debug dump.
678 * However, since VD semaphore is used by other VDs as well, it can
679 * potentially block device creation and destruction for other cores.
680 * TODO (b/234172464): Implement per-core debug dump locks and
681 * lock/unlock vd_semaphore before/after accessing gxp->core_to_vd[].
682 */
683 down_read(&gxp->vd_semaphore);
684
685 boot_mode = gxp_firmware_get_boot_mode(gxp, core_id);
686
687 if (gxp_is_fw_running(gxp, core_id) &&
688 (boot_mode == GXP_BOOT_MODE_STATUS_COLD_BOOT_COMPLETED ||
689 boot_mode == GXP_BOOT_MODE_STATUS_RESUME_COMPLETED)) {
690 gxp_generate_coredump_called = true;
691 if (gxp_generate_coredump(gxp, core_id))
692 dev_err(gxp->dev, "Failed to generate coredump\n");
693 }
694
695 /* Invalidate segments to prepare for the next debug dump trigger */
696 gxp_invalidate_segments(gxp, core_id);
697
698 up_read(&gxp->vd_semaphore);
699
700 /*
701 * This delay is needed to ensure there's sufficient time
702 * in between sscd_report() being called, as the file name of
703 * the core dump files generated by the SSCD daemon includes a
704 * time format with a seconds precision.
705 */
706 if (gxp_generate_coredump_called)
707 msleep(1000);
708
709 mutex_unlock(&gxp->debug_dump_mgr->debug_dump_lock);
John Scheiblede164752021-10-26 13:45:19 -0700710}
711
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000712struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
713 uint core)
714{
715 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
716
Aurora pro automerger399265c2022-06-14 13:50:26 -0700717 if (!gxp_debug_dump_is_enabled())
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000718 return NULL;
719
Aurora pro automerger399265c2022-06-14 13:50:26 -0700720 if (!mgr->buf.vaddr) {
721 dev_err(gxp->dev,
722 "Debug dump must be initialized before %s is called\n",
723 __func__);
724 return NULL;
725 }
726
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000727 return &mgr->debug_dump_works[core].work;
728}
729
John Scheiblede164752021-10-26 13:45:19 -0700730int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
731{
John Scheiblede164752021-10-26 13:45:19 -0700732 struct gxp_debug_dump_manager *mgr;
Aurora pro automerger399265c2022-06-14 13:50:26 -0700733 int core;
734
735 /* Don't initialize the debug dump subsystem unless it's enabled. */
736 if (!gxp_debug_dump_enable)
737 return 0;
John Scheiblede164752021-10-26 13:45:19 -0700738
739 mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
740 if (!mgr)
741 return -ENOMEM;
742 gxp->debug_dump_mgr = mgr;
743 mgr->gxp = gxp;
744
Aurora pro automerger399265c2022-06-14 13:50:26 -0700745 mgr->buf.vaddr =
746 gxp_dma_alloc_coherent(gxp, NULL, 0, DEBUG_DUMP_MEMORY_SIZE,
747 &mgr->buf.daddr, GFP_KERNEL, 0);
748 if (!mgr->buf.vaddr) {
749 dev_err(gxp->dev, "Failed to allocate memory for debug dump\n");
John Scheiblede164752021-10-26 13:45:19 -0700750 return -ENODEV;
751 }
Aurora pro automerger399265c2022-06-14 13:50:26 -0700752 mgr->buf.size = DEBUG_DUMP_MEMORY_SIZE;
753
754 mgr->core_dump = (struct gxp_core_dump *)mgr->buf.vaddr;
755
756 gxp_init_segments(gxp);
John Scheiblede164752021-10-26 13:45:19 -0700757
758 for (core = 0; core < GXP_NUM_CORES; core++) {
Aurora pro automerger399265c2022-06-14 13:50:26 -0700759 gxp_invalidate_segments(gxp, core);
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000760 mgr->debug_dump_works[core].gxp = gxp;
761 mgr->debug_dump_works[core].core_id = core;
762 INIT_WORK(&mgr->debug_dump_works[core].work,
763 gxp_debug_dump_process_dump);
John Scheiblede164752021-10-26 13:45:19 -0700764 }
765
766 /* No need for a DMA handle since the carveout is coherent */
767 mgr->debug_dump_dma_handle = 0;
John Scheiblede164752021-10-26 13:45:19 -0700768 mgr->sscd_dev = sscd_dev;
769 mgr->sscd_pdata = sscd_pdata;
Neela Chithirala49087422022-02-14 03:48:34 +0000770 mutex_init(&mgr->debug_dump_lock);
John Scheiblede164752021-10-26 13:45:19 -0700771
John Scheiblede164752021-10-26 13:45:19 -0700772 return 0;
773}
774
775void gxp_debug_dump_exit(struct gxp_dev *gxp)
776{
777 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
778
779 if (!mgr) {
780 dev_dbg(gxp->dev, "Debug dump manager was not allocated\n");
781 return;
782 }
783
Aurora pro automerger399265c2022-06-14 13:50:26 -0700784 kfree(gxp->debug_dump_mgr->common_dump);
785 gxp_dma_free_coherent(gxp, NULL, 0, DEBUG_DUMP_MEMORY_SIZE,
786 mgr->buf.vaddr, mgr->buf.daddr);
John Scheiblede164752021-10-26 13:45:19 -0700787
Neela Chithirala49087422022-02-14 03:48:34 +0000788 mutex_destroy(&mgr->debug_dump_lock);
John Scheiblede164752021-10-26 13:45:19 -0700789 devm_kfree(mgr->gxp->dev, mgr);
790 gxp->debug_dump_mgr = NULL;
791}
Aurora pro automerger399265c2022-06-14 13:50:26 -0700792
793bool gxp_debug_dump_is_enabled(void)
794{
795 return gxp_debug_dump_enable;
796}