blob: 322e1ca7e11c445db68cd0a808b129047da4c945 [file] [log] [blame]
John Scheiblede164752021-10-26 13:45:19 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * GXP debug dump handler
4 *
5 * Copyright (C) 2020 Google LLC
6 */
7
8#include <linux/bitops.h>
9#include <linux/delay.h>
10#include <linux/device.h>
11#include <linux/dma-mapping.h>
12#include <linux/slab.h>
13#include <linux/string.h>
14#include <linux/workqueue.h>
15
Neela Chithirala3ccb2472022-01-17 04:41:54 +000016#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
John Scheiblede164752021-10-26 13:45:19 -070017#include <linux/platform_data/sscoredump.h>
18#endif
19
20#include "gxp-debug-dump.h"
21#include "gxp-doorbell.h"
22#include "gxp-internal.h"
23#include "gxp-lpm.h"
Aurora pro automerger35e34032022-05-14 14:55:22 -070024#include "gxp-mapping.h"
25#include "gxp-vd.h"
John Scheiblede164752021-10-26 13:45:19 -070026
Neela Chithirala49087422022-02-14 03:48:34 +000027#define SSCD_MSG_LENGTH 64
John Scheiblede164752021-10-26 13:45:19 -070028
Aurora pro automerger27bed782022-04-28 17:59:17 +080029#define SYNC_BARRIER_BLOCK 0x00100000
30#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
31
John Scheiblede164752021-10-26 13:45:19 -070032/* Enum indicating the debug dump request reason. */
33enum gxp_debug_dump_init_type {
34 DEBUG_DUMP_FW_INIT,
35 DEBUG_DUMP_KERNEL_INIT
36};
37
38enum gxp_common_segments_idx {
39 GXP_COMMON_REGISTERS_IDX,
40 GXP_LPM_REGISTERS_IDX
41};
42
43static void gxp_debug_dump_cache_invalidate(struct gxp_dev *gxp)
44{
45 /* Debug dump carveout is currently coherent. NO-OP. */
46 return;
47}
48
49static void gxp_debug_dump_cache_flush(struct gxp_dev *gxp)
50{
51 /* Debug dump carveout is currently coherent. NO-OP. */
52 return;
53}
54
Aurora pro automerger27bed782022-04-28 17:59:17 +080055static u32 gxp_read_sync_barrier_shadow(struct gxp_dev *gxp, uint index)
56{
57 uint barrier_reg_offset;
58
59 if (index >= SYNC_BARRIER_COUNT) {
60 dev_err(gxp->dev,
61 "Attempt to read non-existent sync barrier: %0u\n",
62 index);
63 return 0;
64 }
65
66 barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index) +
67 SYNC_BARRIER_SHADOW_OFFSET;
68
69 return gxp_read_32(gxp, barrier_reg_offset);
70}
John Scheiblede164752021-10-26 13:45:19 -070071
72static void
73gxp_get_common_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
74 struct gxp_common_registers *common_regs)
75{
76 int i;
77 u32 addr;
78
79 dev_dbg(gxp->dev, "Getting common registers\n");
80
81 strscpy(seg_header->name, "Common Registers", sizeof(seg_header->name));
82 seg_header->valid = 1;
83 seg_header->size = sizeof(*common_regs);
84
85 /* Get Aurora Top registers */
86 common_regs->aurora_revision =
87 gxp_read_32(gxp, GXP_REG_AURORA_REVISION);
88 common_regs->common_int_pol_0 =
89 gxp_read_32(gxp, GXP_REG_COMMON_INT_POL_0);
90 common_regs->common_int_pol_1 =
91 gxp_read_32(gxp, GXP_REG_COMMON_INT_POL_1);
92 common_regs->dedicated_int_pol =
93 gxp_read_32(gxp, GXP_REG_DEDICATED_INT_POL);
94 common_regs->raw_ext_int = gxp_read_32(gxp, GXP_REG_RAW_EXT_INT);
95
96 for (i = 0; i < CORE_PD_COUNT; i++) {
97 common_regs->core_pd[i] =
98 gxp_read_32(gxp, GXP_REG_CORE_PD + CORE_PD_BASE(i));
99 }
100
101 common_regs->global_counter_low =
102 gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_LOW);
103 common_regs->global_counter_high =
104 gxp_read_32(gxp, GXP_REG_GLOBAL_COUNTER_HIGH);
105 common_regs->wdog_control = gxp_read_32(gxp, GXP_REG_WDOG_CONTROL);
106 common_regs->wdog_value = gxp_read_32(gxp, GXP_REG_WDOG_VALUE);
107
108 for (i = 0; i < TIMER_COUNT; i++) {
109 addr = GXP_REG_TIMER_COMPARATOR + TIMER_BASE(i);
110 common_regs->timer[i].comparator =
111 gxp_read_32(gxp, addr + TIMER_COMPARATOR_OFFSET);
112 common_regs->timer[i].control =
113 gxp_read_32(gxp, addr + TIMER_CONTROL_OFFSET);
114 common_regs->timer[i].value =
115 gxp_read_32(gxp, addr + TIMER_VALUE_OFFSET);
116 }
117
118 /* Get Doorbell registers */
119 for (i = 0; i < DOORBELL_COUNT; i++)
120 common_regs->doorbell[i] = gxp_doorbell_status(gxp, i);
121
122 /* Get Sync Barrier registers */
123 for (i = 0; i < SYNC_BARRIER_COUNT; i++)
124 common_regs->sync_barrier[i] =
125 gxp_read_sync_barrier_shadow(gxp, i);
126
127 dev_dbg(gxp->dev, "Done getting common registers\n");
128}
129
130static void gxp_get_lpm_psm_registers(struct gxp_dev *gxp,
131 struct gxp_lpm_psm_registers *psm_regs,
132 int psm)
133{
134 struct gxp_lpm_state_table_registers *state_table_regs;
135 int i, j;
136 uint offset;
137
138 /* Get State Table registers */
139 for (i = 0; i < PSM_STATE_TABLE_COUNT; i++) {
140 state_table_regs = &psm_regs->state_table[i];
141
142 /* Get Trans registers */
143 for (j = 0; j < PSM_TRANS_COUNT; j++) {
144 offset = PSM_STATE_TABLE_BASE(i) + PSM_TRANS_BASE(j);
145 state_table_regs->trans[j].next_state =
146 lpm_read_32_psm(gxp, psm, offset +
147 PSM_NEXT_STATE_OFFSET);
148 state_table_regs->trans[j].seq_addr =
149 lpm_read_32_psm(gxp, psm, offset +
150 PSM_SEQ_ADDR_OFFSET);
151 state_table_regs->trans[j].timer_val =
152 lpm_read_32_psm(gxp, psm, offset +
153 PSM_TIMER_VAL_OFFSET);
154 state_table_regs->trans[j].timer_en =
155 lpm_read_32_psm(gxp, psm, offset +
156 PSM_TIMER_EN_OFFSET);
157 state_table_regs->trans[j].trigger_num =
158 lpm_read_32_psm(gxp, psm, offset +
159 PSM_TRIGGER_NUM_OFFSET);
160 state_table_regs->trans[j].trigger_en =
161 lpm_read_32_psm(gxp, psm, offset +
162 PSM_TRIGGER_EN_OFFSET);
163 }
164
165 state_table_regs->enable_state =
166 lpm_read_32_psm(gxp, psm, PSM_STATE_TABLE_BASE(i) +
167 PSM_ENABLE_STATE_OFFSET);
168 }
169
170 /* Get DMEM registers */
171 for (i = 0; i < PSM_DATA_COUNT; i++) {
172 offset = PSM_DMEM_BASE(i) + PSM_DATA_OFFSET;
173 psm_regs->data[i] = lpm_read_32_psm(gxp, psm, offset);
174 }
175
176 psm_regs->cfg = lpm_read_32_psm(gxp, psm, PSM_CFG_OFFSET);
177 psm_regs->status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
178
179 /* Get Debug CSR registers */
180 psm_regs->debug_cfg = lpm_read_32_psm(gxp, psm, PSM_DEBUG_CFG_OFFSET);
181 psm_regs->break_addr = lpm_read_32_psm(gxp, psm, PSM_BREAK_ADDR_OFFSET);
182 psm_regs->gpin_lo_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_LO_RD_OFFSET);
183 psm_regs->gpin_hi_rd = lpm_read_32_psm(gxp, psm, PSM_GPIN_HI_RD_OFFSET);
184 psm_regs->gpout_lo_rd =
185 lpm_read_32_psm(gxp, psm, PSM_GPOUT_LO_RD_OFFSET);
186 psm_regs->gpout_hi_rd =
187 lpm_read_32_psm(gxp, psm, PSM_GPOUT_HI_RD_OFFSET);
188 psm_regs->debug_status =
189 lpm_read_32_psm(gxp, psm, PSM_DEBUG_STATUS_OFFSET);
190}
191
192static void
193gxp_get_lpm_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
194 struct gxp_lpm_registers *lpm_regs)
195{
196 int i;
197 uint offset;
198
199 dev_dbg(gxp->dev, "Getting LPM registers\n");
200
201 strscpy(seg_header->name, "LPM Registers", sizeof(seg_header->name));
202 seg_header->valid = 1;
203 seg_header->size = sizeof(*lpm_regs);
204
205 /* Get LPM Descriptor registers */
206 lpm_regs->lpm_version = lpm_read_32(gxp, LPM_VERSION_OFFSET);
207 lpm_regs->trigger_csr_start =
208 lpm_read_32(gxp, TRIGGER_CSR_START_OFFSET);
209 lpm_regs->imem_start = lpm_read_32(gxp, IMEM_START_OFFSET);
210 lpm_regs->lpm_config = lpm_read_32(gxp, LPM_CONFIG_OFFSET);
211
212 for (i = 0; i < PSM_DESCRIPTOR_COUNT; i++) {
213 offset = PSM_DESCRIPTOR_OFFSET + PSM_DESCRIPTOR_BASE(i);
214 lpm_regs->psm_descriptor[i] = lpm_read_32(gxp, offset);
215 }
216
217 /* Get Trigger CSR registers */
218 for (i = 0; i < EVENTS_EN_COUNT; i++) {
219 offset = EVENTS_EN_OFFSET + EVENTS_EN_BASE(i);
220 lpm_regs->events_en[i] = lpm_read_32(gxp, offset);
221 }
222
223 for (i = 0; i < EVENTS_INV_COUNT; i++) {
224 offset = EVENTS_INV_OFFSET + EVENTS_INV_BASE(i);
225 lpm_regs->events_inv[i] = lpm_read_32(gxp, offset);
226 }
227
228 lpm_regs->function_select = lpm_read_32(gxp, FUNCTION_SELECT_OFFSET);
229 lpm_regs->trigger_status = lpm_read_32(gxp, TRIGGER_STATUS_OFFSET);
230 lpm_regs->event_status = lpm_read_32(gxp, EVENT_STATUS_OFFSET);
231
232 /* Get IMEM registers */
233 for (i = 0; i < OPS_COUNT; i++) {
234 offset = OPS_OFFSET + OPS_BASE(i);
235 lpm_regs->ops[i] = lpm_read_32(gxp, offset);
236 }
237
238 /* Get PSM registers */
239 for (i = 0; i < PSM_COUNT; i++)
240 gxp_get_lpm_psm_registers(gxp, &lpm_regs->psm_regs[i], i);
241
242 dev_dbg(gxp->dev, "Done getting LPM registers\n");
243}
244
Neela Chithirala49087422022-02-14 03:48:34 +0000245static void gxp_get_common_dump(struct gxp_dev *gxp)
John Scheiblede164752021-10-26 13:45:19 -0700246{
Neela Chithirala49087422022-02-14 03:48:34 +0000247 struct gxp_common_dump *common_dump = gxp->debug_dump_mgr->common_dump;
248 struct gxp_seg_header *common_seg_header = common_dump->seg_header;
249 struct gxp_common_dump_data *common_dump_data =
250 &common_dump->common_dump_data;
251
John Scheiblede164752021-10-26 13:45:19 -0700252 gxp_get_common_registers(gxp,
253 &common_seg_header[GXP_COMMON_REGISTERS_IDX],
254 &common_dump_data->common_regs);
255 gxp_get_lpm_registers(gxp, &common_seg_header[GXP_LPM_REGISTERS_IDX],
256 &common_dump_data->lpm_regs);
257
258 dev_dbg(gxp->dev, "Segment Header for Common Segment\n");
259 dev_dbg(gxp->dev, "Name: %s, Size: 0x%0x bytes, Valid :%0x\n",
260 common_seg_header->name, common_seg_header->size,
261 common_seg_header->valid);
262 dev_dbg(gxp->dev, "Register aurora_revision: 0x%0x\n",
263 common_dump_data->common_regs.aurora_revision);
264}
265
Neela Chithirala49087422022-02-14 03:48:34 +0000266#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
267static void gxp_send_to_sscd(struct gxp_dev *gxp, void *segs, int seg_cnt,
Neela Chithirala6d713382022-03-03 05:17:38 +0000268 const char *info)
269{
Neela Chithirala49087422022-02-14 03:48:34 +0000270 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
271 struct sscd_platform_data *pdata =
272 (struct sscd_platform_data *)mgr->sscd_pdata;
273
274 if (!pdata->sscd_report) {
275 dev_err(gxp->dev, "Failed to generate coredump\n");
276 return;
277 }
278
279 if (pdata->sscd_report(gxp->debug_dump_mgr->sscd_dev, segs, seg_cnt,
280 SSCD_FLAGS_ELFARM64HDR, info)) {
281 dev_err(gxp->dev, "Unable to send the report to SSCD daemon\n");
282 return;
283 }
284
285 /*
286 * This delay is needed to ensure there's sufficient time
287 * in between sscd_report() being called, as the file name of
288 * the core dump files generated by the SSCD daemon includes a
289 * time format with a seconds precision.
290 */
291 msleep(1000);
292}
293#endif
294
295static void gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
John Scheiblede164752021-10-26 13:45:19 -0700296{
297 struct gxp_core_dump_header *core_dump_header;
298 struct gxp_core_header *core_header;
Neela Chithirala49087422022-02-14 03:48:34 +0000299 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
300 struct gxp_core_dump *core_dump = mgr->core_dump;
301 struct gxp_common_dump *common_dump = mgr->common_dump;
John Scheiblede164752021-10-26 13:45:19 -0700302 int i;
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000303#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
John Scheiblede164752021-10-26 13:45:19 -0700304 int seg_idx = 0;
John Scheiblede164752021-10-26 13:45:19 -0700305 void *data_addr;
Neela Chithirala49087422022-02-14 03:48:34 +0000306 char sscd_msg[SSCD_MSG_LENGTH];
John Scheiblede164752021-10-26 13:45:19 -0700307
Neela Chithirala49087422022-02-14 03:48:34 +0000308 /* Common */
309 data_addr = &common_dump->common_dump_data.common_regs;
310 for (i = 0; i < GXP_NUM_COMMON_SEGMENTS; i++) {
311 mgr->segs[core_id][seg_idx].addr = data_addr;
312 mgr->segs[core_id][seg_idx].size =
313 common_dump->seg_header[i].size;
314 data_addr += mgr->segs[core_id][seg_idx].size;
315 seg_idx++;
John Scheiblede164752021-10-26 13:45:19 -0700316 }
Neela Chithirala49087422022-02-14 03:48:34 +0000317#endif
318
319 /* Core */
320 core_dump_header = &core_dump->core_dump_header[core_id];
321 core_header = &core_dump_header->core_header;
322 if (!core_header->dump_available) {
323 dev_err(gxp->dev,
324 "Core dump should have been available\n");
325 return;
326 }
327#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
328 /* Core Header */
329 mgr->segs[core_id][seg_idx].addr = core_header;
330 mgr->segs[core_id][seg_idx].size = sizeof(struct gxp_core_header);
331 seg_idx++;
332
333 data_addr = &core_dump->dump_data[core_id *
334 core_header->core_dump_size /
335 sizeof(u32)];
336
337 for (i = 0; i < GXP_NUM_CORE_SEGMENTS - 1; i++) {
338 mgr->segs[core_id][seg_idx].addr = data_addr;
Aurora pro automerger35e34032022-05-14 14:55:22 -0700339 mgr->segs[core_id][seg_idx].size = 0;
340 if (core_dump_header->seg_header[i].valid) {
341 mgr->segs[core_id][seg_idx].size =
342 core_dump_header->seg_header[i].size;
343 }
344
345 data_addr += core_dump_header->seg_header[i].size;
Neela Chithirala49087422022-02-14 03:48:34 +0000346 seg_idx++;
347 }
348
349 dev_dbg(gxp->dev, "Passing dump data to SSCD daemon\n");
350 snprintf(sscd_msg, SSCD_MSG_LENGTH - 1,
351 "gxp debug dump - dump data (core %0x)", core_id);
352 gxp_send_to_sscd(gxp, mgr->segs[core_id], seg_idx, sscd_msg);
353#endif
354 /* This bit signals that core dump has been processed */
355 core_header->dump_available = 0;
356
357 for (i = 0; i < GXP_NUM_COMMON_SEGMENTS; i++)
358 common_dump->seg_header[i].valid = 0;
359
360 for (i = 0; i < GXP_NUM_CORE_SEGMENTS; i++)
361 core_dump_header->seg_header[i].valid = 0;
Neela Chithirala49087422022-02-14 03:48:34 +0000362}
363
Neela Chithirala6d713382022-03-03 05:17:38 +0000364static void gxp_free_segments(struct gxp_dev *gxp)
365{
Neela Chithirala49087422022-02-14 03:48:34 +0000366#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
367 int core_id;
368
369 for (core_id = 0; core_id < GXP_NUM_CORES; core_id++)
370 kfree(gxp->debug_dump_mgr->segs[core_id]);
371#endif
372 kfree(gxp->debug_dump_mgr->common_dump);
373}
374
Aurora pro automerger35e34032022-05-14 14:55:22 -0700375#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
376static int gxp_get_mapping_count(struct gxp_dev *gxp, int core_id)
377{
378 struct gxp_core_dump *core_dump = gxp->debug_dump_mgr->core_dump;
379 struct gxp_core_header *core_header =
380 &core_dump->core_dump_header[core_id].core_header;
381 int i, count = 0;
382
383 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
384 if (core_header->user_bufs[i].size != 0)
385 count++;
386 }
387
388 return count;
389}
390#endif
391
Neela Chithirala6d713382022-03-03 05:17:38 +0000392static int gxp_init_segments(struct gxp_dev *gxp)
393{
Neela Chithirala49087422022-02-14 03:48:34 +0000394#if !IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
395 return 0;
396#else
397 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
John Scheiblede164752021-10-26 13:45:19 -0700398 /*
399 * segs_num include the common segments, core segments for each core,
400 * core header for each core
401 */
Aurora pro automerger35e34032022-05-14 14:55:22 -0700402 int segs_min_num = GXP_NUM_COMMON_SEGMENTS + GXP_NUM_CORE_SEGMENTS + 1;
403 int segs_num;
404 int core_id = 0;
405
Neela Chithirala49087422022-02-14 03:48:34 +0000406 for (core_id = 0; core_id < GXP_NUM_CORES; core_id++) {
Aurora pro automerger35e34032022-05-14 14:55:22 -0700407 segs_num = segs_min_num + gxp_get_mapping_count(gxp, core_id);
Neela Chithirala49087422022-02-14 03:48:34 +0000408 mgr->segs[core_id] = kmalloc_array(segs_num,
409 sizeof(struct sscd_segment),
410 GFP_KERNEL);
411 if (!mgr->segs[core_id])
412 goto err_out;
John Scheiblede164752021-10-26 13:45:19 -0700413 }
414
Neela Chithirala49087422022-02-14 03:48:34 +0000415 mgr->common_dump = kmalloc(sizeof(*mgr->common_dump), GFP_KERNEL);
416 if (!mgr->common_dump)
417 goto err_out;
418
419 return 0;
420err_out:
421 gxp_free_segments(gxp);
422
423 return -ENOMEM;
John Scheiblede164752021-10-26 13:45:19 -0700424#endif
John Scheiblede164752021-10-26 13:45:19 -0700425}
426
Aurora pro automerger35e34032022-05-14 14:55:22 -0700427/*
428 * `user_bufs` is an input buffer containing up to GXP_NUM_BUFFER_MAPPINGS
429 * virtual addresses
430 */
431#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
432static void gxp_add_user_buffer_to_segments(struct gxp_dev *gxp,
433 struct gxp_core_header *core_header,
434 int core_id, int seg_idx,
435 void *user_bufs[])
436{
437 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
438 struct gxp_user_buffer user_buf;
439 int i;
440
441 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS ; i++) {
442 user_buf = core_header->user_bufs[i];
443 if (user_buf.size == 0)
444 continue;
445 mgr->segs[core_id][seg_idx].addr = user_bufs[i];
446 mgr->segs[core_id][seg_idx].size = user_buf.size;
447 seg_idx++;
448 }
449}
450
451static void gxp_user_buffers_vunmap(struct gxp_dev *gxp,
452 struct gxp_core_header *core_header)
453{
454 struct gxp_virtual_device *vd;
455 struct gxp_user_buffer user_buf;
456 int i;
457 struct gxp_mapping *mapping;
458
459 down_read(&gxp->vd_semaphore);
460
461 vd = gxp->core_to_vd[core_header->core_id];
462 if (!vd) {
463 dev_err(gxp->dev, "Virtual device is not available for vunmap\n");
464 goto out;
465 }
466
467 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
468 user_buf = core_header->user_bufs[i];
469 if (user_buf.size == 0)
470 continue;
471
472 mapping = gxp_vd_mapping_search_in_range(
473 vd, (dma_addr_t)user_buf.device_addr);
474 if (!mapping) {
475 dev_err(gxp->dev,
476 "No mapping found for user buffer at device address %#llX\n",
477 user_buf.device_addr);
478 continue;
479 }
480
481 gxp_mapping_vunmap(mapping);
482 gxp_mapping_put(mapping);
483 }
484
485out:
486 up_read(&gxp->vd_semaphore);
487}
488
489static int gxp_user_buffers_vmap(struct gxp_dev *gxp,
490 struct gxp_core_header *core_header,
491 void *user_buf_vaddrs[])
492{
493 struct gxp_virtual_device *vd;
494 struct gxp_user_buffer user_buf;
495 int i, cnt = 0;
496 dma_addr_t daddr;
497 struct gxp_mapping *mapping;
498 void *vaddr;
499
500 down_read(&gxp->vd_semaphore);
501
502 vd = gxp->core_to_vd[core_header->core_id];
503 if (!vd) {
504 dev_err(gxp->dev, "Virtual device is not available for vmap\n");
505 goto out;
506 }
507
508 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++) {
509 user_buf = core_header->user_bufs[i];
510 if (user_buf.size == 0)
511 continue;
512
513 /* Get mapping */
514 daddr = (dma_addr_t)user_buf.device_addr;
515 mapping = gxp_vd_mapping_search_in_range(vd, daddr);
516 if (!mapping) {
517 user_buf.size = 0;
518 continue;
519 }
520
521 /* Map the mapping into kernel space */
522 vaddr = gxp_mapping_vmap(mapping);
523
524 /*
525 * Release the reference from searching for the mapping.
526 * Either vmapping was successful and obtained a new reference
527 * or vmapping failed, and the gxp_mapping is no longer needed.
528 */
529 gxp_mapping_put(mapping);
530
531 if (IS_ERR(vaddr)) {
532 up_read(&gxp->vd_semaphore);
533 gxp_user_buffers_vunmap(gxp, core_header);
534 return 0;
535 }
536
537 /* Get kernel address of the user buffer inside the mapping */
538 user_buf_vaddrs[i] =
539 vaddr + daddr -
540 (mapping->device_address & ~(PAGE_SIZE - 1));
541 cnt++;
542 }
543
544out:
545 up_read(&gxp->vd_semaphore);
546
547 return cnt;
548}
549#endif
550
Neela Chithirala6d713382022-03-03 05:17:38 +0000551static void gxp_handle_dram_dump(struct gxp_dev *gxp, uint32_t core_id)
552{
Neela Chithirala49087422022-02-14 03:48:34 +0000553 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
554 struct gxp_core_dump_header *core_dump_header =
555 &mgr->core_dump->core_dump_header[core_id];
556 struct gxp_seg_header *dram_seg_header =
557 &core_dump_header->seg_header[GXP_CORE_DRAM_SEGMENT_IDX];
558#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
Aurora pro automerger35e34032022-05-14 14:55:22 -0700559 struct gxp_core_header *core_header = &core_dump_header->core_header;
Neela Chithirala49087422022-02-14 03:48:34 +0000560 struct sscd_segment *sscd_seg =
561 &mgr->segs[core_id][GXP_DEBUG_DUMP_DRAM_SEGMENT_IDX];
562 char sscd_msg[SSCD_MSG_LENGTH];
Aurora pro automerger35e34032022-05-14 14:55:22 -0700563 void *user_buf_vaddrs[GXP_NUM_BUFFER_MAPPINGS];
564 int user_buf_cnt;
John Scheiblede164752021-10-26 13:45:19 -0700565
Neela Chithirala49087422022-02-14 03:48:34 +0000566 sscd_seg->addr = gxp->fwbufs[core_id].vaddr;
567 sscd_seg->size = gxp->fwbufs[core_id].size;
568
Aurora pro automerger35e34032022-05-14 14:55:22 -0700569 user_buf_cnt = gxp_user_buffers_vmap(gxp, core_header, user_buf_vaddrs);
570 if (user_buf_cnt > 0) {
571 gxp_add_user_buffer_to_segments(
572 gxp, core_header, core_id,
573 GXP_DEBUG_DUMP_DRAM_SEGMENT_IDX + 1, user_buf_vaddrs);
574 }
575
Neela Chithirala49087422022-02-14 03:48:34 +0000576 dev_dbg(gxp->dev, "Passing dram data to SSCD daemon\n");
577 snprintf(sscd_msg, SSCD_MSG_LENGTH - 1,
578 "gxp debug dump - dram data (core %0x)", core_id);
Aurora pro automerger35e34032022-05-14 14:55:22 -0700579 gxp_send_to_sscd(gxp, sscd_seg, user_buf_cnt + 1, sscd_msg);
580
581 gxp_user_buffers_vunmap(gxp, core_header);
Neela Chithirala49087422022-02-14 03:48:34 +0000582#endif
583 dram_seg_header->valid = 1;
584}
585
586static bool gxp_is_segment_valid(struct gxp_dev *gxp, uint32_t core_id,
Neela Chithirala6d713382022-03-03 05:17:38 +0000587 int seg_idx)
588{
Neela Chithirala49087422022-02-14 03:48:34 +0000589 struct gxp_core_dump *core_dump;
590 struct gxp_core_dump_header *core_dump_header;
591 struct gxp_seg_header *seg_header;
592
593 core_dump = gxp->debug_dump_mgr->core_dump;
594 core_dump_header = &core_dump->core_dump_header[core_id];
595 seg_header = &core_dump_header->seg_header[seg_idx];
596
597 return seg_header->valid;
598}
599
600static int gxp_generate_coredump(struct gxp_dev *gxp, uint32_t core_id)
601{
John Scheiblede164752021-10-26 13:45:19 -0700602 if (!gxp->debug_dump_mgr->core_dump) {
603 dev_err(gxp->dev, "Core dump not allocated\n");
604 return -EINVAL;
605 }
606
John Scheiblede164752021-10-26 13:45:19 -0700607 gxp_debug_dump_cache_invalidate(gxp);
608
Neela Chithirala49087422022-02-14 03:48:34 +0000609 mutex_lock(&gxp->debug_dump_mgr->debug_dump_lock);
John Scheiblede164752021-10-26 13:45:19 -0700610
Neela Chithirala49087422022-02-14 03:48:34 +0000611 if (!gxp_is_segment_valid(gxp, core_id, GXP_CORE_DRAM_SEGMENT_IDX)) {
612 gxp_handle_dram_dump(gxp, core_id);
613 } else {
614 gxp_get_common_dump(gxp);
615 gxp_handle_debug_dump(gxp, core_id);
616 }
John Scheiblede164752021-10-26 13:45:19 -0700617
Neela Chithirala49087422022-02-14 03:48:34 +0000618 mutex_unlock(&gxp->debug_dump_mgr->debug_dump_lock);
John Scheiblede164752021-10-26 13:45:19 -0700619
620 gxp_debug_dump_cache_flush(gxp);
621
622 return 0;
623}
624
John Scheiblede164752021-10-26 13:45:19 -0700625void gxp_debug_dump_process_dump(struct work_struct *work)
626{
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000627 struct gxp_debug_dump_work *debug_dump_work =
628 container_of(work, struct gxp_debug_dump_work, work);
629
630 uint core_id = debug_dump_work->core_id;
631 struct gxp_dev *gxp = debug_dump_work->gxp;
John Scheiblede164752021-10-26 13:45:19 -0700632
Aurora pro automerger35e34032022-05-14 14:55:22 -0700633 gxp_generate_coredump(gxp, core_id);
John Scheiblede164752021-10-26 13:45:19 -0700634}
635
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000636struct work_struct *gxp_debug_dump_get_notification_handler(struct gxp_dev *gxp,
637 uint core)
638{
639 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
640
641 if (!mgr)
642 return NULL;
643
644 return &mgr->debug_dump_works[core].work;
645}
646
John Scheiblede164752021-10-26 13:45:19 -0700647int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
648{
649 struct resource r;
650 struct gxp_debug_dump_manager *mgr;
651 struct gxp_core_dump_header *core_dump_header;
Neela Chithirala49087422022-02-14 03:48:34 +0000652 int core, i;
John Scheiblede164752021-10-26 13:45:19 -0700653
654 mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
655 if (!mgr)
656 return -ENOMEM;
657 gxp->debug_dump_mgr = mgr;
658 mgr->gxp = gxp;
659
660 /* Find and map the memory reserved for the debug dump */
661 if (gxp_acquire_rmem_resource(gxp, &r, "gxp-debug-dump-region")) {
662 dev_err(gxp->dev,
663 "Unable to acquire debug dump reserved memory\n");
664 return -ENODEV;
665 }
666 gxp->coredumpbuf.paddr = r.start;
667 gxp->coredumpbuf.size = resource_size(&r);
668 /*
669 * TODO (b/193069216) allocate a dynamic buffer and let
670 * `gxp_dma_map_resources()` map it to the expected paddr
671 */
672 /*
673 * TODO (b/200169232) Using memremap until devm_memremap is added to
674 * the GKI ABI
675 */
676 gxp->coredumpbuf.vaddr = memremap(gxp->coredumpbuf.paddr,
677 gxp->coredumpbuf.size, MEMREMAP_WC);
678 if (IS_ERR(gxp->coredumpbuf.vaddr)) {
679 dev_err(gxp->dev, "Failed to map core dump\n");
680 return -ENODEV;
681 }
682 mgr->core_dump = (struct gxp_core_dump *)gxp->coredumpbuf.vaddr;
683
684 for (core = 0; core < GXP_NUM_CORES; core++) {
685 core_dump_header = &mgr->core_dump->core_dump_header[core];
686 core_dump_header->core_header.dump_available = 0;
Neela Chithirala49087422022-02-14 03:48:34 +0000687 for (i = 0; i < GXP_NUM_CORE_SEGMENTS; i++)
688 core_dump_header->seg_header[i].valid = 0;
Aurora pro automerger35e34032022-05-14 14:55:22 -0700689 for (i = 0; i < GXP_NUM_BUFFER_MAPPINGS; i++)
690 core_dump_header->core_header.user_bufs[i].size = 0;
Neela Chithirala3ccb2472022-01-17 04:41:54 +0000691 mgr->debug_dump_works[core].gxp = gxp;
692 mgr->debug_dump_works[core].core_id = core;
693 INIT_WORK(&mgr->debug_dump_works[core].work,
694 gxp_debug_dump_process_dump);
John Scheiblede164752021-10-26 13:45:19 -0700695 }
696
Neela Chithirala49087422022-02-14 03:48:34 +0000697 gxp_init_segments(gxp);
698
John Scheiblede164752021-10-26 13:45:19 -0700699 /* No need for a DMA handle since the carveout is coherent */
700 mgr->debug_dump_dma_handle = 0;
John Scheiblede164752021-10-26 13:45:19 -0700701 mgr->sscd_dev = sscd_dev;
702 mgr->sscd_pdata = sscd_pdata;
Neela Chithirala49087422022-02-14 03:48:34 +0000703 mutex_init(&mgr->debug_dump_lock);
John Scheiblede164752021-10-26 13:45:19 -0700704
John Scheiblede164752021-10-26 13:45:19 -0700705 return 0;
706}
707
708void gxp_debug_dump_exit(struct gxp_dev *gxp)
709{
710 struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
711
712 if (!mgr) {
713 dev_dbg(gxp->dev, "Debug dump manager was not allocated\n");
714 return;
715 }
716
Neela Chithirala49087422022-02-14 03:48:34 +0000717 gxp_free_segments(gxp);
John Scheiblede164752021-10-26 13:45:19 -0700718 /* TODO (b/200169232) Remove this once we're using devm_memremap */
719 memunmap(gxp->coredumpbuf.vaddr);
720
Neela Chithirala49087422022-02-14 03:48:34 +0000721 mutex_destroy(&mgr->debug_dump_lock);
John Scheiblede164752021-10-26 13:45:19 -0700722 devm_kfree(mgr->gxp->dev, mgr);
723 gxp->debug_dump_mgr = NULL;
724}