blob: 643094c3339c346019d9c125ba5f6b76917e999d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44/*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
49 *
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
85#include <linux/config.h>
86#include <linux/kernel.h>
87#include <linux/list.h>
88#include <linux/slab.h>
89#include <linux/interrupt.h>
90#include <linux/wait.h>
91#include <linux/errno.h>
92#include <linux/module.h>
93#include <linux/moduleparam.h>
94#include <linux/pci.h>
95#include <linux/fs.h>
96#include <linux/poll.h>
97#include <asm/byteorder.h>
98#include <asm/atomic.h>
99#include <asm/uaccess.h>
100#include <linux/delay.h>
101#include <linux/spinlock.h>
102
103#include <asm/pgtable.h>
104#include <asm/page.h>
105#include <asm/irq.h>
106#include <linux/sched.h>
107#include <linux/types.h>
108#include <linux/vmalloc.h>
109#include <linux/init.h>
110
111#ifdef CONFIG_PPC_PMAC
112#include <asm/machdep.h>
113#include <asm/pmac_feature.h>
114#include <asm/prom.h>
115#include <asm/pci-bridge.h>
116#endif
117
118#include "csr1212.h"
119#include "ieee1394.h"
120#include "ieee1394_types.h"
121#include "hosts.h"
122#include "dma.h"
123#include "iso.h"
124#include "ieee1394_core.h"
125#include "highlevel.h"
126#include "ohci1394.h"
127
128#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129#define OHCI1394_DEBUG
130#endif
131
132#ifdef DBGMSG
133#undef DBGMSG
134#endif
135
136#ifdef OHCI1394_DEBUG
137#define DBGMSG(fmt, args...) \
138printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139#else
140#define DBGMSG(fmt, args...)
141#endif
142
143#ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144#define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147#define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150static int global_outstanding_dmas = 0;
151#else
152#define OHCI_DMA_ALLOC(fmt, args...)
153#define OHCI_DMA_FREE(fmt, args...)
154#endif
155
156/* print general (card independent) information */
157#define PRINT_G(level, fmt, args...) \
158printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160/* print card specific information */
161#define PRINT(level, fmt, args...) \
162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164/* Module Parameters */
165static int phys_dma = 1;
166module_param(phys_dma, int, 0644);
167MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
168
169static void dma_trm_tasklet(unsigned long data);
170static void dma_trm_reset(struct dma_trm_ctx *d);
171
172static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
173 enum context_type type, int ctx, int num_desc,
174 int buf_size, int split_buf_size, int context_base);
175static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
176static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
177
178static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
179 enum context_type type, int ctx, int num_desc,
180 int context_base);
181
182static void ohci1394_pci_remove(struct pci_dev *pdev);
183
184#ifndef __LITTLE_ENDIAN
185static unsigned hdr_sizes[] =
186{
187 3, /* TCODE_WRITEQ */
188 4, /* TCODE_WRITEB */
189 3, /* TCODE_WRITE_RESPONSE */
190 0, /* ??? */
191 3, /* TCODE_READQ */
192 4, /* TCODE_READB */
193 3, /* TCODE_READQ_RESPONSE */
194 4, /* TCODE_READB_RESPONSE */
195 1, /* TCODE_CYCLE_START (???) */
196 4, /* TCODE_LOCK_REQUEST */
197 2, /* TCODE_ISO_DATA */
198 4, /* TCODE_LOCK_RESPONSE */
199};
200
201/* Swap headers */
202static inline void packet_swab(quadlet_t *data, int tcode)
203{
204 size_t size = hdr_sizes[tcode];
205
206 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
207 return;
208
209 while (size--)
210 data[size] = swab32(data[size]);
211}
212#else
213/* Don't waste cycles on same sex byte swaps */
214#define packet_swab(w,x)
215#endif /* !LITTLE_ENDIAN */
216
217/***********************************
218 * IEEE-1394 functionality section *
219 ***********************************/
220
221static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
222{
223 int i;
224 unsigned long flags;
225 quadlet_t r;
226
227 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
228
229 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
230
231 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
232 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
233 break;
234
235 mdelay(1);
236 }
237
238 r = reg_read(ohci, OHCI1394_PhyControl);
239
240 if (i >= OHCI_LOOP_COUNT)
241 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
242 r, r & 0x80000000, i);
243
244 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
245
246 return (r & 0x00ff0000) >> 16;
247}
248
249static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
250{
251 int i;
252 unsigned long flags;
253 u32 r = 0;
254
255 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
256
257 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
258
259 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
260 r = reg_read(ohci, OHCI1394_PhyControl);
261 if (!(r & 0x00004000))
262 break;
263
264 mdelay(1);
265 }
266
267 if (i == OHCI_LOOP_COUNT)
268 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
269 r, r & 0x00004000, i);
270
271 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
272
273 return;
274}
275
276/* Or's our value into the current value */
277static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
278{
279 u8 old;
280
281 old = get_phy_reg (ohci, addr);
282 old |= data;
283 set_phy_reg (ohci, addr, old);
284
285 return;
286}
287
288static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
289 int phyid, int isroot)
290{
291 quadlet_t *q = ohci->selfid_buf_cpu;
292 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
293 size_t size;
294 quadlet_t q0, q1;
295
296 /* Check status of self-id reception */
297
298 if (ohci->selfid_swap)
299 q0 = le32_to_cpu(q[0]);
300 else
301 q0 = q[0];
302
303 if ((self_id_count & 0x80000000) ||
304 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
305 PRINT(KERN_ERR,
306 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
307 self_id_count, q0, ohci->self_id_errors);
308
309 /* Tip by James Goodwin <jamesg@Filanet.com>:
310 * We had an error, generate another bus reset in response. */
311 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
312 set_phy_reg_mask (ohci, 1, 0x40);
313 ohci->self_id_errors++;
314 } else {
315 PRINT(KERN_ERR,
316 "Too many errors on SelfID error reception, giving up!");
317 }
318 return;
319 }
320
321 /* SelfID Ok, reset error counter. */
322 ohci->self_id_errors = 0;
323
324 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
325 q++;
326
327 while (size > 0) {
328 if (ohci->selfid_swap) {
329 q0 = le32_to_cpu(q[0]);
330 q1 = le32_to_cpu(q[1]);
331 } else {
332 q0 = q[0];
333 q1 = q[1];
334 }
335
336 if (q0 == ~q1) {
337 DBGMSG ("SelfID packet 0x%x received", q0);
338 hpsb_selfid_received(host, cpu_to_be32(q0));
339 if (((q0 & 0x3f000000) >> 24) == phyid)
340 DBGMSG ("SelfID for this node is 0x%08x", q0);
341 } else {
342 PRINT(KERN_ERR,
343 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
344 }
345 q += 2;
346 size -= 2;
347 }
348
349 DBGMSG("SelfID complete");
350
351 return;
352}
353
354static void ohci_soft_reset(struct ti_ohci *ohci) {
355 int i;
356
357 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
358
359 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
360 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
361 break;
362 mdelay(1);
363 }
364 DBGMSG ("Soft reset finished");
365}
366
367
368/* Generate the dma receive prgs and start the context */
369static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
370{
371 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
372 int i;
373
374 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
375
376 for (i=0; i<d->num_desc; i++) {
377 u32 c;
378
379 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
380 if (generate_irq)
381 c |= DMA_CTL_IRQ;
382
383 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
384
385 /* End of descriptor list? */
386 if (i + 1 < d->num_desc) {
387 d->prg_cpu[i]->branchAddress =
388 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
389 } else {
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
392 }
393
394 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
395 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
396 }
397
398 d->buf_ind = 0;
399 d->buf_offset = 0;
400
401 if (d->type == DMA_CTX_ISO) {
402 /* Clear contextControl */
403 reg_write(ohci, d->ctrlClear, 0xffffffff);
404
405 /* Set bufferFill, isochHeader, multichannel for IR context */
406 reg_write(ohci, d->ctrlSet, 0xd0000000);
407
408 /* Set the context match register to match on all tags */
409 reg_write(ohci, d->ctxtMatch, 0xf0000000);
410
411 /* Clear the multi channel mask high and low registers */
412 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
413 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
414
415 /* Set up isoRecvIntMask to generate interrupts */
416 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
417 }
418
419 /* Tell the controller where the first AR program is */
420 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
421
422 /* Run context */
423 reg_write(ohci, d->ctrlSet, 0x00008000);
424
425 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
426}
427
428/* Initialize the dma transmit context */
429static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
430{
431 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
432
433 /* Stop the context */
434 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
435
436 d->prg_ind = 0;
437 d->sent_ind = 0;
438 d->free_prgs = d->num_desc;
439 d->branchAddrPtr = NULL;
440 INIT_LIST_HEAD(&d->fifo_list);
441 INIT_LIST_HEAD(&d->pending_list);
442
443 if (d->type == DMA_CTX_ISO) {
444 /* enable interrupts */
445 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
446 }
447
448 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
449}
450
451/* Count the number of available iso contexts */
452static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
453{
454 int i,ctx=0;
455 u32 tmp;
456
457 reg_write(ohci, reg, 0xffffffff);
458 tmp = reg_read(ohci, reg);
459
460 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
461
462 /* Count the number of contexts */
463 for (i=0; i<32; i++) {
464 if (tmp & 1) ctx++;
465 tmp >>= 1;
466 }
467 return ctx;
468}
469
470/* Global initialization */
471static void ohci_initialize(struct ti_ohci *ohci)
472{
473 char irq_buf[16];
474 quadlet_t buf;
475 int num_ports, i;
476
477 spin_lock_init(&ohci->phy_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 /* Put some defaults to these undefined bus options */
480 buf = reg_read(ohci, OHCI1394_BusOptions);
481 buf |= 0x60000000; /* Enable CMC and ISC */
Ben Collins1934b8b2005-07-09 20:01:23 -0400482 if (hpsb_disable_irm)
483 buf &= ~0x80000000;
484 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 buf |= 0x80000000; /* Enable IRMC */
486 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
487 buf &= ~0x18000000; /* Disable PMC and BMC */
488 reg_write(ohci, OHCI1394_BusOptions, buf);
489
490 /* Set the bus number */
491 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
492
493 /* Enable posted writes */
494 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
495
496 /* Clear link control register */
497 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
498
499 /* Enable cycle timer and cycle master and set the IRM
500 * contender bit in our self ID packets if appropriate. */
501 reg_write(ohci, OHCI1394_LinkControlSet,
502 OHCI1394_LinkControl_CycleTimerEnable |
503 OHCI1394_LinkControl_CycleMaster);
Ben Collins1934b8b2005-07-09 20:01:23 -0400504 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
505 if (hpsb_disable_irm)
506 i &= ~PHY_04_CONTENDER;
507 else
508 i |= PHY_04_CONTENDER;
509 set_phy_reg(ohci, 4, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511 /* Set up self-id dma buffer */
512 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
513
514 /* enable self-id and phys */
515 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
516 OHCI1394_LinkControl_RcvPhyPkt);
517
518 /* Set the Config ROM mapping register */
519 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
520
521 /* Now get our max packet size */
522 ohci->max_packet_size =
523 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
524
525 /* Don't accept phy packets into AR request context */
526 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
527
528 /* Clear the interrupt mask */
529 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
530 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
531
532 /* Clear the interrupt mask */
533 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
534 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
535
536 /* Initialize AR dma */
537 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
538 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
539
540 /* Initialize AT dma */
541 initialize_dma_trm_ctx(&ohci->at_req_context);
542 initialize_dma_trm_ctx(&ohci->at_resp_context);
543
Jody McIntyree4ec0f22005-04-21 14:09:42 -0700544 /* Initialize IR Legacy DMA channel mask */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 ohci->ir_legacy_channels = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Stefan Richter180a4302006-03-28 19:57:34 -0500547 /* Accept AR requests from all nodes */
548 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
549
550 /* Set the address range of the physical response unit.
551 * Most controllers do not implement it as a writable register though.
552 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
553 * register content.
554 * To actually enable physical responses is the job of our interrupt
555 * handler which programs the physical request filter. */
Stefan Richtera54c9d32006-05-15 22:09:46 +0200556 reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000);
Stefan Richter180a4302006-03-28 19:57:34 -0500557
558 DBGMSG("physUpperBoundOffset=%08x",
559 reg_read(ohci, OHCI1394_PhyUpperBound));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 /* Specify AT retries */
562 reg_write(ohci, OHCI1394_ATRetries,
563 OHCI1394_MAX_AT_REQ_RETRIES |
564 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
565 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
566
567 /* We don't want hardware swapping */
568 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
569
570 /* Enable interrupts */
571 reg_write(ohci, OHCI1394_IntMaskSet,
572 OHCI1394_unrecoverableError |
573 OHCI1394_masterIntEnable |
574 OHCI1394_busReset |
575 OHCI1394_selfIDComplete |
576 OHCI1394_RSPkt |
577 OHCI1394_RQPkt |
578 OHCI1394_respTxComplete |
579 OHCI1394_reqTxComplete |
580 OHCI1394_isochRx |
581 OHCI1394_isochTx |
Jody McIntyree2f81652006-03-28 19:55:11 -0500582 OHCI1394_postedWriteErr |
Ben Collins57fdb582006-06-12 18:12:21 -0400583 OHCI1394_cycleTooLong |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 OHCI1394_cycleInconsistent);
585
586 /* Enable link */
587 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
588
589 buf = reg_read(ohci, OHCI1394_Version);
590#ifndef __sparc__
591 sprintf (irq_buf, "%d", ohci->dev->irq);
592#else
593 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
594#endif
595 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
Stefan Richter209171a2005-12-13 11:05:00 -0500596 "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
598 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
599 pci_resource_start(ohci->dev, 0),
600 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
Stefan Richter209171a2005-12-13 11:05:00 -0500601 ohci->max_packet_size,
602 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
604 /* Check all of our ports to make sure that if anything is
605 * connected, we enable that port. */
606 num_ports = get_phy_reg(ohci, 2) & 0xf;
607 for (i = 0; i < num_ports; i++) {
608 unsigned int status;
609
610 set_phy_reg(ohci, 7, i);
611 status = get_phy_reg(ohci, 8);
612
613 if (status & 0x20)
614 set_phy_reg(ohci, 8, status & ~1);
615 }
616
617 /* Serial EEPROM Sanity check. */
618 if ((ohci->max_packet_size < 512) ||
619 (ohci->max_packet_size > 4096)) {
620 /* Serial EEPROM contents are suspect, set a sane max packet
621 * size and print the raw contents for bug reports if verbose
622 * debug is enabled. */
623#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
624 int i;
625#endif
626
627 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
628 "attempting to setting max_packet_size to 512 bytes");
629 reg_write(ohci, OHCI1394_BusOptions,
630 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
631 ohci->max_packet_size = 512;
632#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
633 PRINT(KERN_DEBUG, " EEPROM Present: %d",
634 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
635 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
636
637 for (i = 0;
638 ((i < 1000) &&
639 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
640 udelay(10);
641
642 for (i = 0; i < 0x20; i++) {
643 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
644 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
645 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
646 }
647#endif
648 }
649}
650
651/*
652 * Insert a packet in the DMA fifo and generate the DMA prg
653 * FIXME: rewrite the program in order to accept packets crossing
654 * page boundaries.
655 * check also that a single dma descriptor doesn't cross a
656 * page boundary.
657 */
658static void insert_packet(struct ti_ohci *ohci,
659 struct dma_trm_ctx *d, struct hpsb_packet *packet)
660{
661 u32 cycleTimer;
662 int idx = d->prg_ind;
663
664 DBGMSG("Inserting packet for node " NODE_BUS_FMT
665 ", tlabel=%d, tcode=0x%x, speed=%d",
666 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
667 packet->tcode, packet->speed_code);
668
669 d->prg_cpu[idx]->begin.address = 0;
670 d->prg_cpu[idx]->begin.branchAddress = 0;
671
672 if (d->type == DMA_CTX_ASYNC_RESP) {
673 /*
674 * For response packets, we need to put a timeout value in
675 * the 16 lower bits of the status... let's try 1 sec timeout
676 */
677 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
678 d->prg_cpu[idx]->begin.status = cpu_to_le32(
679 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
680 ((cycleTimer&0x01fff000)>>12));
681
682 DBGMSG("cycleTimer: %08x timeStamp: %08x",
683 cycleTimer, d->prg_cpu[idx]->begin.status);
684 } else
685 d->prg_cpu[idx]->begin.status = 0;
686
687 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
688
689 if (packet->type == hpsb_raw) {
690 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
691 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
692 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
693 } else {
694 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
695 (packet->header[0] & 0xFFFF);
696
697 if (packet->tcode == TCODE_ISO_DATA) {
698 /* Sending an async stream packet */
699 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
700 } else {
701 /* Sending a normal async request or response */
702 d->prg_cpu[idx]->data[1] =
703 (packet->header[1] & 0xFFFF) |
704 (packet->header[0] & 0xFFFF0000);
705 d->prg_cpu[idx]->data[2] = packet->header[2];
706 d->prg_cpu[idx]->data[3] = packet->header[3];
707 }
708 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
709 }
710
711 if (packet->data_size) { /* block transmit */
712 if (packet->tcode == TCODE_STREAM_DATA){
713 d->prg_cpu[idx]->begin.control =
714 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
715 DMA_CTL_IMMEDIATE | 0x8);
716 } else {
717 d->prg_cpu[idx]->begin.control =
718 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
719 DMA_CTL_IMMEDIATE | 0x10);
720 }
721 d->prg_cpu[idx]->end.control =
722 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
723 DMA_CTL_IRQ |
724 DMA_CTL_BRANCH |
725 packet->data_size);
726 /*
727 * Check that the packet data buffer
728 * does not cross a page boundary.
729 *
730 * XXX Fix this some day. eth1394 seems to trigger
731 * it, but ignoring it doesn't seem to cause a
732 * problem.
733 */
734#if 0
735 if (cross_bound((unsigned long)packet->data,
736 packet->data_size)>0) {
737 /* FIXME: do something about it */
738 PRINT(KERN_ERR,
739 "%s: packet data addr: %p size %Zd bytes "
740 "cross page boundary", __FUNCTION__,
741 packet->data, packet->data_size);
742 }
743#endif
744 d->prg_cpu[idx]->end.address = cpu_to_le32(
745 pci_map_single(ohci->dev, packet->data,
746 packet->data_size,
747 PCI_DMA_TODEVICE));
748 OHCI_DMA_ALLOC("single, block transmit packet");
749
750 d->prg_cpu[idx]->end.branchAddress = 0;
751 d->prg_cpu[idx]->end.status = 0;
752 if (d->branchAddrPtr)
753 *(d->branchAddrPtr) =
754 cpu_to_le32(d->prg_bus[idx] | 0x3);
755 d->branchAddrPtr =
756 &(d->prg_cpu[idx]->end.branchAddress);
757 } else { /* quadlet transmit */
758 if (packet->type == hpsb_raw)
759 d->prg_cpu[idx]->begin.control =
760 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
761 DMA_CTL_IMMEDIATE |
762 DMA_CTL_IRQ |
763 DMA_CTL_BRANCH |
764 (packet->header_size + 4));
765 else
766 d->prg_cpu[idx]->begin.control =
767 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
768 DMA_CTL_IMMEDIATE |
769 DMA_CTL_IRQ |
770 DMA_CTL_BRANCH |
771 packet->header_size);
772
773 if (d->branchAddrPtr)
774 *(d->branchAddrPtr) =
775 cpu_to_le32(d->prg_bus[idx] | 0x2);
776 d->branchAddrPtr =
777 &(d->prg_cpu[idx]->begin.branchAddress);
778 }
779
780 } else { /* iso packet */
781 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
782 (packet->header[0] & 0xFFFF);
783 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
784 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
785
786 d->prg_cpu[idx]->begin.control =
787 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
788 DMA_CTL_IMMEDIATE | 0x8);
789 d->prg_cpu[idx]->end.control =
790 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
791 DMA_CTL_UPDATE |
792 DMA_CTL_IRQ |
793 DMA_CTL_BRANCH |
794 packet->data_size);
795 d->prg_cpu[idx]->end.address = cpu_to_le32(
796 pci_map_single(ohci->dev, packet->data,
797 packet->data_size, PCI_DMA_TODEVICE));
798 OHCI_DMA_ALLOC("single, iso transmit packet");
799
800 d->prg_cpu[idx]->end.branchAddress = 0;
801 d->prg_cpu[idx]->end.status = 0;
802 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
803 " begin=%08x %08x %08x %08x\n"
804 " %08x %08x %08x %08x\n"
805 " end =%08x %08x %08x %08x",
806 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
807 d->prg_cpu[idx]->begin.control,
808 d->prg_cpu[idx]->begin.address,
809 d->prg_cpu[idx]->begin.branchAddress,
810 d->prg_cpu[idx]->begin.status,
811 d->prg_cpu[idx]->data[0],
812 d->prg_cpu[idx]->data[1],
813 d->prg_cpu[idx]->data[2],
814 d->prg_cpu[idx]->data[3],
815 d->prg_cpu[idx]->end.control,
816 d->prg_cpu[idx]->end.address,
817 d->prg_cpu[idx]->end.branchAddress,
818 d->prg_cpu[idx]->end.status);
819 if (d->branchAddrPtr)
820 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
821 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
822 }
823 d->free_prgs--;
824
825 /* queue the packet in the appropriate context queue */
826 list_add_tail(&packet->driver_list, &d->fifo_list);
827 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
828}
829
830/*
831 * This function fills the FIFO with the (eventual) pending packets
832 * and runs or wakes up the DMA prg if necessary.
833 *
834 * The function MUST be called with the d->lock held.
835 */
836static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
837{
838 struct hpsb_packet *packet, *ptmp;
839 int idx = d->prg_ind;
840 int z = 0;
841
842 /* insert the packets into the dma fifo */
843 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
844 if (!d->free_prgs)
845 break;
846
847 /* For the first packet only */
848 if (!z)
849 z = (packet->data_size) ? 3 : 2;
850
851 /* Insert the packet */
852 list_del_init(&packet->driver_list);
853 insert_packet(ohci, d, packet);
854 }
855
856 /* Nothing must have been done, either no free_prgs or no packets */
857 if (z == 0)
858 return;
859
860 /* Is the context running ? (should be unless it is
861 the first packet to be sent in this context) */
862 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
863 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
864
865 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
866 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
867
868 /* Check that the node id is valid, and not 63 */
869 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
870 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
871 else
872 reg_write(ohci, d->ctrlSet, 0x8000);
873 } else {
874 /* Wake up the dma context if necessary */
875 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
876 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
877
878 /* do this always, to avoid race condition */
879 reg_write(ohci, d->ctrlSet, 0x1000);
880 }
881
882 return;
883}
884
885/* Transmission of an async or iso packet */
886static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
887{
888 struct ti_ohci *ohci = host->hostdata;
889 struct dma_trm_ctx *d;
890 unsigned long flags;
891
892 if (packet->data_size > ohci->max_packet_size) {
893 PRINT(KERN_ERR,
894 "Transmit packet size %Zd is too big",
895 packet->data_size);
896 return -EOVERFLOW;
897 }
898
899 /* Decide whether we have an iso, a request, or a response packet */
900 if (packet->type == hpsb_raw)
901 d = &ohci->at_req_context;
902 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
903 /* The legacy IT DMA context is initialized on first
904 * use. However, the alloc cannot be run from
905 * interrupt context, so we bail out if that is the
906 * case. I don't see anyone sending ISO packets from
907 * interrupt context anyway... */
908
909 if (ohci->it_legacy_context.ohci == NULL) {
910 if (in_interrupt()) {
911 PRINT(KERN_ERR,
912 "legacy IT context cannot be initialized during interrupt");
913 return -EINVAL;
914 }
915
916 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
917 DMA_CTX_ISO, 0, IT_NUM_DESC,
918 OHCI1394_IsoXmitContextBase) < 0) {
919 PRINT(KERN_ERR,
920 "error initializing legacy IT context");
921 return -ENOMEM;
922 }
923
924 initialize_dma_trm_ctx(&ohci->it_legacy_context);
925 }
926
927 d = &ohci->it_legacy_context;
928 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
929 d = &ohci->at_resp_context;
930 else
931 d = &ohci->at_req_context;
932
933 spin_lock_irqsave(&d->lock,flags);
934
935 list_add_tail(&packet->driver_list, &d->pending_list);
936
937 dma_trm_flush(ohci, d);
938
939 spin_unlock_irqrestore(&d->lock,flags);
940
941 return 0;
942}
943
944static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
945{
946 struct ti_ohci *ohci = host->hostdata;
947 int retval = 0;
948 unsigned long flags;
949 int phy_reg;
950
951 switch (cmd) {
952 case RESET_BUS:
953 switch (arg) {
954 case SHORT_RESET:
955 phy_reg = get_phy_reg(ohci, 5);
956 phy_reg |= 0x40;
957 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
958 break;
959 case LONG_RESET:
960 phy_reg = get_phy_reg(ohci, 1);
961 phy_reg |= 0x40;
962 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
963 break;
964 case SHORT_RESET_NO_FORCE_ROOT:
965 phy_reg = get_phy_reg(ohci, 1);
966 if (phy_reg & 0x80) {
967 phy_reg &= ~0x80;
968 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
969 }
970
971 phy_reg = get_phy_reg(ohci, 5);
972 phy_reg |= 0x40;
973 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
974 break;
975 case LONG_RESET_NO_FORCE_ROOT:
976 phy_reg = get_phy_reg(ohci, 1);
977 phy_reg &= ~0x80;
978 phy_reg |= 0x40;
979 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
980 break;
981 case SHORT_RESET_FORCE_ROOT:
982 phy_reg = get_phy_reg(ohci, 1);
983 if (!(phy_reg & 0x80)) {
984 phy_reg |= 0x80;
985 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
986 }
987
988 phy_reg = get_phy_reg(ohci, 5);
989 phy_reg |= 0x40;
990 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
991 break;
992 case LONG_RESET_FORCE_ROOT:
993 phy_reg = get_phy_reg(ohci, 1);
994 phy_reg |= 0xc0;
995 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
996 break;
997 default:
998 retval = -1;
999 }
1000 break;
1001
1002 case GET_CYCLE_COUNTER:
1003 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1004 break;
1005
1006 case SET_CYCLE_COUNTER:
1007 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1008 break;
1009
1010 case SET_BUS_ID:
1011 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1012 break;
1013
1014 case ACT_CYCLE_MASTER:
1015 if (arg) {
1016 /* check if we are root and other nodes are present */
1017 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1018 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1019 /*
1020 * enable cycleTimer, cycleMaster
1021 */
1022 DBGMSG("Cycle master enabled");
1023 reg_write(ohci, OHCI1394_LinkControlSet,
1024 OHCI1394_LinkControl_CycleTimerEnable |
1025 OHCI1394_LinkControl_CycleMaster);
1026 }
1027 } else {
1028 /* disable cycleTimer, cycleMaster, cycleSource */
1029 reg_write(ohci, OHCI1394_LinkControlClear,
1030 OHCI1394_LinkControl_CycleTimerEnable |
1031 OHCI1394_LinkControl_CycleMaster |
1032 OHCI1394_LinkControl_CycleSource);
1033 }
1034 break;
1035
1036 case CANCEL_REQUESTS:
1037 DBGMSG("Cancel request received");
1038 dma_trm_reset(&ohci->at_req_context);
1039 dma_trm_reset(&ohci->at_resp_context);
1040 break;
1041
1042 case ISO_LISTEN_CHANNEL:
1043 {
1044 u64 mask;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001045 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1046 int ir_legacy_active;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
1048 if (arg<0 || arg>63) {
1049 PRINT(KERN_ERR,
1050 "%s: IS0 listen channel %d is out of range",
1051 __FUNCTION__, arg);
1052 return -EFAULT;
1053 }
1054
1055 mask = (u64)0x1<<arg;
1056
1057 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1058
1059 if (ohci->ISO_channel_usage & mask) {
1060 PRINT(KERN_ERR,
1061 "%s: IS0 listen channel %d is already used",
1062 __FUNCTION__, arg);
1063 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1064 return -EFAULT;
1065 }
1066
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001067 ir_legacy_active = ohci->ir_legacy_channels;
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 ohci->ISO_channel_usage |= mask;
1070 ohci->ir_legacy_channels |= mask;
1071
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001072 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1073
1074 if (!ir_legacy_active) {
1075 if (ohci1394_register_iso_tasklet(ohci,
1076 &ohci->ir_legacy_tasklet) < 0) {
1077 PRINT(KERN_ERR, "No IR DMA context available");
1078 return -EBUSY;
1079 }
1080
1081 /* the IR context can be assigned to any DMA context
1082 * by ohci1394_register_iso_tasklet */
1083 d->ctx = ohci->ir_legacy_tasklet.context;
1084 d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1085 32*d->ctx;
1086 d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1087 32*d->ctx;
1088 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1089 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1090
1091 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1092
Olaf Hering98848fa2005-07-14 00:33:45 -07001093 if (printk_ratelimit())
Jody McIntyre32e7a042005-09-30 11:59:19 -07001094 DBGMSG("IR legacy activated");
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001095 }
1096
1097 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (arg>31)
1100 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1101 1<<(arg-32));
1102 else
1103 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1104 1<<arg);
1105
1106 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1107 DBGMSG("Listening enabled on channel %d", arg);
1108 break;
1109 }
1110 case ISO_UNLISTEN_CHANNEL:
1111 {
1112 u64 mask;
1113
1114 if (arg<0 || arg>63) {
1115 PRINT(KERN_ERR,
1116 "%s: IS0 unlisten channel %d is out of range",
1117 __FUNCTION__, arg);
1118 return -EFAULT;
1119 }
1120
1121 mask = (u64)0x1<<arg;
1122
1123 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1124
1125 if (!(ohci->ISO_channel_usage & mask)) {
1126 PRINT(KERN_ERR,
1127 "%s: IS0 unlisten channel %d is not used",
1128 __FUNCTION__, arg);
1129 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1130 return -EFAULT;
1131 }
1132
1133 ohci->ISO_channel_usage &= ~mask;
1134 ohci->ir_legacy_channels &= ~mask;
1135
1136 if (arg>31)
1137 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1138 1<<(arg-32));
1139 else
1140 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1141 1<<arg);
1142
1143 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1144 DBGMSG("Listening disabled on channel %d", arg);
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001145
1146 if (ohci->ir_legacy_channels == 0) {
1147 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1148 DBGMSG("ISO legacy receive context stopped");
1149 }
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 break;
1152 }
1153 default:
1154 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1155 cmd);
1156 break;
1157 }
1158 return retval;
1159}
1160
1161/***********************************
1162 * rawiso ISO reception *
1163 ***********************************/
1164
1165/*
1166 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1167 buffer is split into "blocks" (regions described by one DMA
1168 descriptor). Each block must be one page or less in size, and
1169 must not cross a page boundary.
1170
1171 There is one little wrinkle with buffer-fill mode: a packet that
1172 starts in the final block may wrap around into the first block. But
1173 the user API expects all packets to be contiguous. Our solution is
1174 to keep the very last page of the DMA buffer in reserve - if a
1175 packet spans the gap, we copy its tail into this page.
1176*/
1177
1178struct ohci_iso_recv {
1179 struct ti_ohci *ohci;
1180
1181 struct ohci1394_iso_tasklet task;
1182 int task_active;
1183
1184 enum { BUFFER_FILL_MODE = 0,
1185 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1186
1187 /* memory and PCI mapping for the DMA descriptors */
1188 struct dma_prog_region prog;
1189 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1190
1191 /* how many DMA blocks fit in the buffer */
1192 unsigned int nblocks;
1193
1194 /* stride of DMA blocks */
1195 unsigned int buf_stride;
1196
1197 /* number of blocks to batch between interrupts */
1198 int block_irq_interval;
1199
1200 /* block that DMA will finish next */
1201 int block_dma;
1202
1203 /* (buffer-fill only) block that the reader will release next */
1204 int block_reader;
1205
1206 /* (buffer-fill only) bytes of buffer the reader has released,
1207 less than one block */
1208 int released_bytes;
1209
1210 /* (buffer-fill only) buffer offset at which the next packet will appear */
1211 int dma_offset;
1212
1213 /* OHCI DMA context control registers */
1214 u32 ContextControlSet;
1215 u32 ContextControlClear;
1216 u32 CommandPtr;
1217 u32 ContextMatch;
1218};
1219
1220static void ohci_iso_recv_task(unsigned long data);
1221static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1222static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1223static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1224static void ohci_iso_recv_program(struct hpsb_iso *iso);
1225
1226static int ohci_iso_recv_init(struct hpsb_iso *iso)
1227{
1228 struct ti_ohci *ohci = iso->host->hostdata;
1229 struct ohci_iso_recv *recv;
1230 int ctx;
1231 int ret = -ENOMEM;
1232
1233 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1234 if (!recv)
1235 return -ENOMEM;
1236
1237 iso->hostdata = recv;
1238 recv->ohci = ohci;
1239 recv->task_active = 0;
1240 dma_prog_region_init(&recv->prog);
1241 recv->block = NULL;
1242
1243 /* use buffer-fill mode, unless irq_interval is 1
1244 (note: multichannel requires buffer-fill) */
1245
1246 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1247 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1248 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1249 } else {
1250 recv->dma_mode = BUFFER_FILL_MODE;
1251 }
1252
1253 /* set nblocks, buf_stride, block_irq_interval */
1254
1255 if (recv->dma_mode == BUFFER_FILL_MODE) {
1256 recv->buf_stride = PAGE_SIZE;
1257
1258 /* one block per page of data in the DMA buffer, minus the final guard page */
1259 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1260 if (recv->nblocks < 3) {
1261 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1262 goto err;
1263 }
1264
1265 /* iso->irq_interval is in packets - translate that to blocks */
1266 if (iso->irq_interval == 1)
1267 recv->block_irq_interval = 1;
1268 else
1269 recv->block_irq_interval = iso->irq_interval *
1270 ((recv->nblocks+1)/iso->buf_packets);
1271 if (recv->block_irq_interval*4 > recv->nblocks)
1272 recv->block_irq_interval = recv->nblocks/4;
1273 if (recv->block_irq_interval < 1)
1274 recv->block_irq_interval = 1;
1275
1276 } else {
1277 int max_packet_size;
1278
1279 recv->nblocks = iso->buf_packets;
1280 recv->block_irq_interval = iso->irq_interval;
1281 if (recv->block_irq_interval * 4 > iso->buf_packets)
1282 recv->block_irq_interval = iso->buf_packets / 4;
1283 if (recv->block_irq_interval < 1)
1284 recv->block_irq_interval = 1;
1285
1286 /* choose a buffer stride */
1287 /* must be a power of 2, and <= PAGE_SIZE */
1288
1289 max_packet_size = iso->buf_size / iso->buf_packets;
1290
1291 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1292 recv->buf_stride *= 2);
1293
1294 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1295 recv->buf_stride > PAGE_SIZE) {
1296 /* this shouldn't happen, but anyway... */
1297 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1298 goto err;
1299 }
1300 }
1301
1302 recv->block_reader = 0;
1303 recv->released_bytes = 0;
1304 recv->block_dma = 0;
1305 recv->dma_offset = 0;
1306
1307 /* size of DMA program = one descriptor per block */
1308 if (dma_prog_region_alloc(&recv->prog,
1309 sizeof(struct dma_cmd) * recv->nblocks,
1310 recv->ohci->dev))
1311 goto err;
1312
1313 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1314
1315 ohci1394_init_iso_tasklet(&recv->task,
1316 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1317 OHCI_ISO_RECEIVE,
1318 ohci_iso_recv_task, (unsigned long) iso);
1319
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001320 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1321 ret = -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 goto err;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001323 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
1325 recv->task_active = 1;
1326
1327 /* recv context registers are spaced 32 bytes apart */
1328 ctx = recv->task.context;
1329 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1330 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1331 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1332 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1333
1334 if (iso->channel == -1) {
1335 /* clear multi-channel selection mask */
1336 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1337 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1338 }
1339
1340 /* write the DMA program */
1341 ohci_iso_recv_program(iso);
1342
1343 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1344 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1345 recv->dma_mode == BUFFER_FILL_MODE ?
1346 "buffer-fill" : "packet-per-buffer",
1347 iso->buf_size/PAGE_SIZE, iso->buf_size,
1348 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1349
1350 return 0;
1351
1352err:
1353 ohci_iso_recv_shutdown(iso);
1354 return ret;
1355}
1356
1357static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1358{
1359 struct ohci_iso_recv *recv = iso->hostdata;
1360
1361 /* disable interrupts */
1362 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1363
1364 /* halt DMA */
1365 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1366}
1367
1368static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1369{
1370 struct ohci_iso_recv *recv = iso->hostdata;
1371
1372 if (recv->task_active) {
1373 ohci_iso_recv_stop(iso);
1374 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1375 recv->task_active = 0;
1376 }
1377
1378 dma_prog_region_free(&recv->prog);
1379 kfree(recv);
1380 iso->hostdata = NULL;
1381}
1382
1383/* set up a "gapped" ring buffer DMA program */
1384static void ohci_iso_recv_program(struct hpsb_iso *iso)
1385{
1386 struct ohci_iso_recv *recv = iso->hostdata;
1387 int blk;
1388
1389 /* address of 'branch' field in previous DMA descriptor */
1390 u32 *prev_branch = NULL;
1391
1392 for (blk = 0; blk < recv->nblocks; blk++) {
1393 u32 control;
1394
1395 /* the DMA descriptor */
1396 struct dma_cmd *cmd = &recv->block[blk];
1397
1398 /* offset of the DMA descriptor relative to the DMA prog buffer */
1399 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1400
1401 /* offset of this packet's data within the DMA buffer */
1402 unsigned long buf_offset = blk * recv->buf_stride;
1403
1404 if (recv->dma_mode == BUFFER_FILL_MODE) {
1405 control = 2 << 28; /* INPUT_MORE */
1406 } else {
1407 control = 3 << 28; /* INPUT_LAST */
1408 }
1409
1410 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1411
1412 /* interrupt on last block, and at intervals */
1413 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1414 control |= 3 << 20; /* want interrupt */
1415 }
1416
1417 control |= 3 << 18; /* enable branch to address */
1418 control |= recv->buf_stride;
1419
1420 cmd->control = cpu_to_le32(control);
1421 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1422 cmd->branchAddress = 0; /* filled in on next loop */
1423 cmd->status = cpu_to_le32(recv->buf_stride);
1424
1425 /* link the previous descriptor to this one */
1426 if (prev_branch) {
1427 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1428 }
1429
1430 prev_branch = &cmd->branchAddress;
1431 }
1432
1433 /* the final descriptor's branch address and Z should be left at 0 */
1434}
1435
1436/* listen or unlisten to a specific channel (multi-channel mode only) */
1437static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1438{
1439 struct ohci_iso_recv *recv = iso->hostdata;
1440 int reg, i;
1441
1442 if (channel < 32) {
1443 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1444 i = channel;
1445 } else {
1446 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1447 i = channel - 32;
1448 }
1449
1450 reg_write(recv->ohci, reg, (1 << i));
1451
1452 /* issue a dummy read to force all PCI writes to be posted immediately */
1453 mb();
1454 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1455}
1456
1457static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1458{
1459 struct ohci_iso_recv *recv = iso->hostdata;
1460 int i;
1461
1462 for (i = 0; i < 64; i++) {
1463 if (mask & (1ULL << i)) {
1464 if (i < 32)
1465 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1466 else
1467 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1468 } else {
1469 if (i < 32)
1470 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1471 else
1472 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1473 }
1474 }
1475
1476 /* issue a dummy read to force all PCI writes to be posted immediately */
1477 mb();
1478 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1479}
1480
1481static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1482{
1483 struct ohci_iso_recv *recv = iso->hostdata;
1484 struct ti_ohci *ohci = recv->ohci;
1485 u32 command, contextMatch;
1486
1487 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1488 wmb();
1489
1490 /* always keep ISO headers */
1491 command = (1 << 30);
1492
1493 if (recv->dma_mode == BUFFER_FILL_MODE)
1494 command |= (1 << 31);
1495
1496 reg_write(recv->ohci, recv->ContextControlSet, command);
1497
1498 /* match on specified tags */
1499 contextMatch = tag_mask << 28;
1500
1501 if (iso->channel == -1) {
1502 /* enable multichannel reception */
1503 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1504 } else {
1505 /* listen on channel */
1506 contextMatch |= iso->channel;
1507 }
1508
1509 if (cycle != -1) {
1510 u32 seconds;
1511
1512 /* enable cycleMatch */
1513 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1514
1515 /* set starting cycle */
1516 cycle &= 0x1FFF;
1517
1518 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1519 just snarf them from the current time */
1520 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1521
1522 /* advance one second to give some extra time for DMA to start */
1523 seconds += 1;
1524
1525 cycle |= (seconds & 3) << 13;
1526
1527 contextMatch |= cycle << 12;
1528 }
1529
1530 if (sync != -1) {
1531 /* set sync flag on first DMA descriptor */
1532 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1533 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1534
1535 /* match sync field */
1536 contextMatch |= (sync&0xf)<<8;
1537 }
1538
1539 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1540
1541 /* address of first descriptor block */
1542 command = dma_prog_region_offset_to_bus(&recv->prog,
1543 recv->block_dma * sizeof(struct dma_cmd));
1544 command |= 1; /* Z=1 */
1545
1546 reg_write(recv->ohci, recv->CommandPtr, command);
1547
1548 /* enable interrupts */
1549 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1550
1551 wmb();
1552
1553 /* run */
1554 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1555
1556 /* issue a dummy read of the cycle timer register to force
1557 all PCI writes to be posted immediately */
1558 mb();
1559 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1560
1561 /* check RUN */
1562 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1563 PRINT(KERN_ERR,
1564 "Error starting IR DMA (ContextControl 0x%08x)\n",
1565 reg_read(recv->ohci, recv->ContextControlSet));
1566 return -1;
1567 }
1568
1569 return 0;
1570}
1571
1572static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1573{
1574 /* re-use the DMA descriptor for the block */
1575 /* by linking the previous descriptor to it */
1576
1577 int next_i = block;
1578 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1579
1580 struct dma_cmd *next = &recv->block[next_i];
1581 struct dma_cmd *prev = &recv->block[prev_i];
Ben Collins1934b8b2005-07-09 20:01:23 -04001582
1583 /* ignore out-of-range requests */
1584 if ((block < 0) || (block > recv->nblocks))
1585 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587 /* 'next' becomes the new end of the DMA chain,
1588 so disable branch and enable interrupt */
1589 next->branchAddress = 0;
1590 next->control |= cpu_to_le32(3 << 20);
1591 next->status = cpu_to_le32(recv->buf_stride);
1592
1593 /* link prev to next */
1594 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1595 sizeof(struct dma_cmd) * next_i)
1596 | 1); /* Z=1 */
1597
1598 /* disable interrupt on previous DMA descriptor, except at intervals */
1599 if ((prev_i % recv->block_irq_interval) == 0) {
1600 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1601 } else {
1602 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1603 }
1604 wmb();
1605
1606 /* wake up DMA in case it fell asleep */
1607 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1608}
1609
1610static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1611 struct hpsb_iso_packet_info *info)
1612{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 /* release the memory where the packet was */
Ben Collins1934b8b2005-07-09 20:01:23 -04001614 recv->released_bytes += info->total_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616 /* have we released enough memory for one block? */
1617 while (recv->released_bytes > recv->buf_stride) {
1618 ohci_iso_recv_release_block(recv, recv->block_reader);
1619 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1620 recv->released_bytes -= recv->buf_stride;
1621 }
1622}
1623
1624static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1625{
1626 struct ohci_iso_recv *recv = iso->hostdata;
1627 if (recv->dma_mode == BUFFER_FILL_MODE) {
1628 ohci_iso_recv_bufferfill_release(recv, info);
1629 } else {
1630 ohci_iso_recv_release_block(recv, info - iso->infos);
1631 }
1632}
1633
1634/* parse all packets from blocks that have been fully received */
1635static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1636{
1637 int wake = 0;
1638 int runaway = 0;
1639 struct ti_ohci *ohci = recv->ohci;
1640
1641 while (1) {
1642 /* we expect the next parsable packet to begin at recv->dma_offset */
1643 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1644
1645 unsigned int offset;
Ben Collins1934b8b2005-07-09 20:01:23 -04001646 unsigned short len, cycle, total_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 unsigned char channel, tag, sy;
1648
1649 unsigned char *p = iso->data_buf.kvirt;
1650
1651 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1652
1653 /* don't loop indefinitely */
1654 if (runaway++ > 100000) {
1655 atomic_inc(&iso->overflows);
1656 PRINT(KERN_ERR,
1657 "IR DMA error - Runaway during buffer parsing!\n");
1658 break;
1659 }
1660
1661 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1662 if (this_block == recv->block_dma)
1663 break;
1664
1665 wake = 1;
1666
1667 /* parse data length, tag, channel, and sy */
1668
1669 /* note: we keep our own local copies of 'len' and 'offset'
1670 so the user can't mess with them by poking in the mmap area */
1671
1672 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1673
1674 if (len > 4096) {
1675 PRINT(KERN_ERR,
1676 "IR DMA error - bogus 'len' value %u\n", len);
1677 }
1678
1679 channel = p[recv->dma_offset+1] & 0x3F;
1680 tag = p[recv->dma_offset+1] >> 6;
1681 sy = p[recv->dma_offset+0] & 0xF;
1682
1683 /* advance to data payload */
1684 recv->dma_offset += 4;
1685
1686 /* check for wrap-around */
1687 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1688 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1689 }
1690
1691 /* dma_offset now points to the first byte of the data payload */
1692 offset = recv->dma_offset;
1693
1694 /* advance to xferStatus/timeStamp */
1695 recv->dma_offset += len;
1696
Ben Collins1934b8b2005-07-09 20:01:23 -04001697 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 /* payload is padded to 4 bytes */
1699 if (len % 4) {
1700 recv->dma_offset += 4 - (len%4);
Ben Collins1934b8b2005-07-09 20:01:23 -04001701 total_len += 4 - (len%4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 }
1703
1704 /* check for wrap-around */
1705 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1706 /* uh oh, the packet data wraps from the last
1707 to the first DMA block - make the packet
1708 contiguous by copying its "tail" into the
1709 guard page */
1710
1711 int guard_off = recv->buf_stride*recv->nblocks;
1712 int tail_len = len - (guard_off - offset);
1713
1714 if (tail_len > 0 && tail_len < recv->buf_stride) {
1715 memcpy(iso->data_buf.kvirt + guard_off,
1716 iso->data_buf.kvirt,
1717 tail_len);
1718 }
1719
1720 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1721 }
1722
1723 /* parse timestamp */
1724 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1725 cycle &= 0x1FFF;
1726
1727 /* advance to next packet */
1728 recv->dma_offset += 4;
1729
1730 /* check for wrap-around */
1731 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1732 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1733 }
1734
Ben Collins1934b8b2005-07-09 20:01:23 -04001735 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 }
1737
1738 if (wake)
1739 hpsb_iso_wake(iso);
1740}
1741
1742static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1743{
1744 int loop;
1745 struct ti_ohci *ohci = recv->ohci;
1746
1747 /* loop over all blocks */
1748 for (loop = 0; loop < recv->nblocks; loop++) {
1749
1750 /* check block_dma to see if it's done */
1751 struct dma_cmd *im = &recv->block[recv->block_dma];
1752
1753 /* check the DMA descriptor for new writes to xferStatus */
1754 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1755
1756 /* rescount is the number of bytes *remaining to be written* in the block */
1757 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1758
1759 unsigned char event = xferstatus & 0x1F;
1760
1761 if (!event) {
1762 /* nothing has happened to this block yet */
1763 break;
1764 }
1765
1766 if (event != 0x11) {
1767 atomic_inc(&iso->overflows);
1768 PRINT(KERN_ERR,
1769 "IR DMA error - OHCI error code 0x%02x\n", event);
1770 }
1771
1772 if (rescount != 0) {
1773 /* the card is still writing to this block;
1774 we can't touch it until it's done */
1775 break;
1776 }
1777
1778 /* OK, the block is finished... */
1779
1780 /* sync our view of the block */
1781 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1782
1783 /* reset the DMA descriptor */
1784 im->status = recv->buf_stride;
1785
1786 /* advance block_dma */
1787 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1788
1789 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1790 atomic_inc(&iso->overflows);
1791 DBGMSG("ISO reception overflow - "
1792 "ran out of DMA blocks");
1793 }
1794 }
1795
1796 /* parse any packets that have arrived */
1797 ohci_iso_recv_bufferfill_parse(iso, recv);
1798}
1799
1800static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1801{
1802 int count;
1803 int wake = 0;
1804 struct ti_ohci *ohci = recv->ohci;
1805
1806 /* loop over the entire buffer */
1807 for (count = 0; count < recv->nblocks; count++) {
1808 u32 packet_len = 0;
1809
1810 /* pointer to the DMA descriptor */
1811 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1812
1813 /* check the DMA descriptor for new writes to xferStatus */
1814 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1815 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1816
1817 unsigned char event = xferstatus & 0x1F;
1818
1819 if (!event) {
1820 /* this packet hasn't come in yet; we are done for now */
1821 goto out;
1822 }
1823
1824 if (event == 0x11) {
1825 /* packet received successfully! */
1826
1827 /* rescount is the number of bytes *remaining* in the packet buffer,
1828 after the packet was written */
1829 packet_len = recv->buf_stride - rescount;
1830
1831 } else if (event == 0x02) {
1832 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1833 } else if (event) {
1834 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1835 }
1836
1837 /* sync our view of the buffer */
1838 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1839
1840 /* record the per-packet info */
1841 {
1842 /* iso header is 8 bytes ahead of the data payload */
1843 unsigned char *hdr;
1844
1845 unsigned int offset;
1846 unsigned short cycle;
1847 unsigned char channel, tag, sy;
1848
1849 offset = iso->pkt_dma * recv->buf_stride;
1850 hdr = iso->data_buf.kvirt + offset;
1851
1852 /* skip iso header */
1853 offset += 8;
1854 packet_len -= 8;
1855
1856 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1857 channel = hdr[5] & 0x3F;
1858 tag = hdr[5] >> 6;
1859 sy = hdr[4] & 0xF;
1860
Ben Collins1934b8b2005-07-09 20:01:23 -04001861 hpsb_iso_packet_received(iso, offset, packet_len,
1862 recv->buf_stride, cycle, channel, tag, sy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 }
1864
1865 /* reset the DMA descriptor */
1866 il->status = recv->buf_stride;
1867
1868 wake = 1;
1869 recv->block_dma = iso->pkt_dma;
1870 }
1871
1872out:
1873 if (wake)
1874 hpsb_iso_wake(iso);
1875}
1876
1877static void ohci_iso_recv_task(unsigned long data)
1878{
1879 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1880 struct ohci_iso_recv *recv = iso->hostdata;
1881
1882 if (recv->dma_mode == BUFFER_FILL_MODE)
1883 ohci_iso_recv_bufferfill_task(iso, recv);
1884 else
1885 ohci_iso_recv_packetperbuf_task(iso, recv);
1886}
1887
1888/***********************************
1889 * rawiso ISO transmission *
1890 ***********************************/
1891
1892struct ohci_iso_xmit {
1893 struct ti_ohci *ohci;
1894 struct dma_prog_region prog;
1895 struct ohci1394_iso_tasklet task;
1896 int task_active;
1897
1898 u32 ContextControlSet;
1899 u32 ContextControlClear;
1900 u32 CommandPtr;
1901};
1902
1903/* transmission DMA program:
1904 one OUTPUT_MORE_IMMEDIATE for the IT header
1905 one OUTPUT_LAST for the buffer data */
1906
1907struct iso_xmit_cmd {
1908 struct dma_cmd output_more_immediate;
1909 u8 iso_hdr[8];
1910 u32 unused[2];
1911 struct dma_cmd output_last;
1912};
1913
1914static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1915static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1916static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1917static void ohci_iso_xmit_task(unsigned long data);
1918
1919static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1920{
1921 struct ohci_iso_xmit *xmit;
1922 unsigned int prog_size;
1923 int ctx;
1924 int ret = -ENOMEM;
1925
1926 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1927 if (!xmit)
1928 return -ENOMEM;
1929
1930 iso->hostdata = xmit;
1931 xmit->ohci = iso->host->hostdata;
1932 xmit->task_active = 0;
1933
1934 dma_prog_region_init(&xmit->prog);
1935
1936 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1937
1938 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1939 goto err;
1940
1941 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1942 ohci_iso_xmit_task, (unsigned long) iso);
1943
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001944 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1945 ret = -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 goto err;
Jody McIntyree4ec0f22005-04-21 14:09:42 -07001947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948
1949 xmit->task_active = 1;
1950
1951 /* xmit context registers are spaced 16 bytes apart */
1952 ctx = xmit->task.context;
1953 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1954 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1955 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1956
1957 return 0;
1958
1959err:
1960 ohci_iso_xmit_shutdown(iso);
1961 return ret;
1962}
1963
1964static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1965{
1966 struct ohci_iso_xmit *xmit = iso->hostdata;
1967 struct ti_ohci *ohci = xmit->ohci;
1968
1969 /* disable interrupts */
1970 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1971
1972 /* halt DMA */
1973 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1974 /* XXX the DMA context will lock up if you try to send too much data! */
1975 PRINT(KERN_ERR,
1976 "you probably exceeded the OHCI card's bandwidth limit - "
1977 "reload the module and reduce xmit bandwidth");
1978 }
1979}
1980
1981static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1982{
1983 struct ohci_iso_xmit *xmit = iso->hostdata;
1984
1985 if (xmit->task_active) {
1986 ohci_iso_xmit_stop(iso);
1987 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1988 xmit->task_active = 0;
1989 }
1990
1991 dma_prog_region_free(&xmit->prog);
1992 kfree(xmit);
1993 iso->hostdata = NULL;
1994}
1995
1996static void ohci_iso_xmit_task(unsigned long data)
1997{
1998 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1999 struct ohci_iso_xmit *xmit = iso->hostdata;
2000 struct ti_ohci *ohci = xmit->ohci;
2001 int wake = 0;
2002 int count;
2003
2004 /* check the whole buffer if necessary, starting at pkt_dma */
2005 for (count = 0; count < iso->buf_packets; count++) {
2006 int cycle;
2007
2008 /* DMA descriptor */
2009 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2010
2011 /* check for new writes to xferStatus */
2012 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2013 u8 event = xferstatus & 0x1F;
2014
2015 if (!event) {
2016 /* packet hasn't been sent yet; we are done for now */
2017 break;
2018 }
2019
2020 if (event != 0x11)
2021 PRINT(KERN_ERR,
2022 "IT DMA error - OHCI error code 0x%02x\n", event);
2023
2024 /* at least one packet went out, so wake up the writer */
2025 wake = 1;
2026
2027 /* parse cycle */
2028 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2029
2030 /* tell the subsystem the packet has gone out */
2031 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2032
2033 /* reset the DMA descriptor for next time */
2034 cmd->output_last.status = 0;
2035 }
2036
2037 if (wake)
2038 hpsb_iso_wake(iso);
2039}
2040
2041static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2042{
2043 struct ohci_iso_xmit *xmit = iso->hostdata;
2044 struct ti_ohci *ohci = xmit->ohci;
2045
2046 int next_i, prev_i;
2047 struct iso_xmit_cmd *next, *prev;
2048
2049 unsigned int offset;
2050 unsigned short len;
2051 unsigned char tag, sy;
2052
2053 /* check that the packet doesn't cross a page boundary
2054 (we could allow this if we added OUTPUT_MORE descriptor support) */
2055 if (cross_bound(info->offset, info->len)) {
2056 PRINT(KERN_ERR,
2057 "rawiso xmit: packet %u crosses a page boundary",
2058 iso->first_packet);
2059 return -EINVAL;
2060 }
2061
2062 offset = info->offset;
2063 len = info->len;
2064 tag = info->tag;
2065 sy = info->sy;
2066
2067 /* sync up the card's view of the buffer */
2068 dma_region_sync_for_device(&iso->data_buf, offset, len);
2069
2070 /* append first_packet to the DMA chain */
2071 /* by linking the previous descriptor to it */
2072 /* (next will become the new end of the DMA chain) */
2073
2074 next_i = iso->first_packet;
2075 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2076
2077 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2078 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2079
2080 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2081 memset(next, 0, sizeof(struct iso_xmit_cmd));
2082 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2083
2084 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2085
2086 /* tcode = 0xA, and sy */
2087 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2088
2089 /* tag and channel number */
2090 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2091
2092 /* transmission speed */
2093 next->iso_hdr[2] = iso->speed & 0x7;
2094
2095 /* payload size */
2096 next->iso_hdr[6] = len & 0xFF;
2097 next->iso_hdr[7] = len >> 8;
2098
2099 /* set up the OUTPUT_LAST */
2100 next->output_last.control = cpu_to_le32(1 << 28);
2101 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2102 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2103 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2104 next->output_last.control |= cpu_to_le32(len);
2105
2106 /* payload bus address */
2107 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2108
2109 /* leave branchAddress at zero for now */
2110
2111 /* re-write the previous DMA descriptor to chain to this one */
2112
2113 /* set prev branch address to point to next (Z=3) */
2114 prev->output_last.branchAddress = cpu_to_le32(
2115 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2116
2117 /* disable interrupt, unless required by the IRQ interval */
2118 if (prev_i % iso->irq_interval) {
2119 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2120 } else {
2121 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2122 }
2123
2124 wmb();
2125
2126 /* wake DMA in case it is sleeping */
2127 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2128
2129 /* issue a dummy read of the cycle timer to force all PCI
2130 writes to be posted immediately */
2131 mb();
2132 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2133
2134 return 0;
2135}
2136
2137static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2138{
2139 struct ohci_iso_xmit *xmit = iso->hostdata;
2140 struct ti_ohci *ohci = xmit->ohci;
2141
2142 /* clear out the control register */
2143 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2144 wmb();
2145
2146 /* address and length of first descriptor block (Z=3) */
2147 reg_write(xmit->ohci, xmit->CommandPtr,
2148 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2149
2150 /* cycle match */
2151 if (cycle != -1) {
2152 u32 start = cycle & 0x1FFF;
2153
2154 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2155 just snarf them from the current time */
2156 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2157
2158 /* advance one second to give some extra time for DMA to start */
2159 seconds += 1;
2160
2161 start |= (seconds & 3) << 13;
2162
2163 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2164 }
2165
2166 /* enable interrupts */
2167 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2168
2169 /* run */
2170 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2171 mb();
2172
2173 /* wait 100 usec to give the card time to go active */
2174 udelay(100);
2175
2176 /* check the RUN bit */
2177 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2178 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2179 reg_read(xmit->ohci, xmit->ContextControlSet));
2180 return -1;
2181 }
2182
2183 return 0;
2184}
2185
2186static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2187{
2188
2189 switch(cmd) {
2190 case XMIT_INIT:
2191 return ohci_iso_xmit_init(iso);
2192 case XMIT_START:
2193 return ohci_iso_xmit_start(iso, arg);
2194 case XMIT_STOP:
2195 ohci_iso_xmit_stop(iso);
2196 return 0;
2197 case XMIT_QUEUE:
2198 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2199 case XMIT_SHUTDOWN:
2200 ohci_iso_xmit_shutdown(iso);
2201 return 0;
2202
2203 case RECV_INIT:
2204 return ohci_iso_recv_init(iso);
2205 case RECV_START: {
2206 int *args = (int*) arg;
2207 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2208 }
2209 case RECV_STOP:
2210 ohci_iso_recv_stop(iso);
2211 return 0;
2212 case RECV_RELEASE:
2213 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2214 return 0;
2215 case RECV_FLUSH:
2216 ohci_iso_recv_task((unsigned long) iso);
2217 return 0;
2218 case RECV_SHUTDOWN:
2219 ohci_iso_recv_shutdown(iso);
2220 return 0;
2221 case RECV_LISTEN_CHANNEL:
2222 ohci_iso_recv_change_channel(iso, arg, 1);
2223 return 0;
2224 case RECV_UNLISTEN_CHANNEL:
2225 ohci_iso_recv_change_channel(iso, arg, 0);
2226 return 0;
2227 case RECV_SET_CHANNEL_MASK:
2228 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2229 return 0;
2230
2231 default:
2232 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2233 cmd);
2234 break;
2235 }
2236 return -EINVAL;
2237}
2238
2239/***************************************
2240 * IEEE-1394 functionality section END *
2241 ***************************************/
2242
2243
2244/********************************************************
2245 * Global stuff (interrupt handler, init/shutdown code) *
2246 ********************************************************/
2247
2248static void dma_trm_reset(struct dma_trm_ctx *d)
2249{
2250 unsigned long flags;
2251 LIST_HEAD(packet_list);
2252 struct ti_ohci *ohci = d->ohci;
2253 struct hpsb_packet *packet, *ptmp;
2254
2255 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2256
2257 /* Lock the context, reset it and release it. Move the packets
2258 * that were pending in the context to packet_list and free
2259 * them after releasing the lock. */
2260
2261 spin_lock_irqsave(&d->lock, flags);
2262
2263 list_splice(&d->fifo_list, &packet_list);
2264 list_splice(&d->pending_list, &packet_list);
2265 INIT_LIST_HEAD(&d->fifo_list);
2266 INIT_LIST_HEAD(&d->pending_list);
2267
2268 d->branchAddrPtr = NULL;
2269 d->sent_ind = d->prg_ind;
2270 d->free_prgs = d->num_desc;
2271
2272 spin_unlock_irqrestore(&d->lock, flags);
2273
2274 if (list_empty(&packet_list))
2275 return;
2276
2277 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2278
2279 /* Now process subsystem callbacks for the packets from this
2280 * context. */
2281 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2282 list_del_init(&packet->driver_list);
2283 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2284 }
2285}
2286
2287static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2288 quadlet_t rx_event,
2289 quadlet_t tx_event)
2290{
2291 struct ohci1394_iso_tasklet *t;
2292 unsigned long mask;
Andy Wingo4a9949d2005-10-19 21:23:46 -07002293 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
Andy Wingo4a9949d2005-10-19 21:23:46 -07002295 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
2297 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2298 mask = 1 << t->context;
2299
2300 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2301 tasklet_schedule(&t->tasklet);
2302 else if (rx_event & mask)
2303 tasklet_schedule(&t->tasklet);
2304 }
2305
Andy Wingo4a9949d2005-10-19 21:23:46 -07002306 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307}
2308
2309static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2310 struct pt_regs *regs_are_unused)
2311{
2312 quadlet_t event, node_id;
2313 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2314 struct hpsb_host *host = ohci->host;
2315 int phyid = -1, isroot = 0;
2316 unsigned long flags;
2317
2318 /* Read and clear the interrupt event register. Don't clear
2319 * the busReset event, though. This is done when we get the
2320 * selfIDComplete interrupt. */
2321 spin_lock_irqsave(&ohci->event_lock, flags);
2322 event = reg_read(ohci, OHCI1394_IntEventClear);
2323 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2324 spin_unlock_irqrestore(&ohci->event_lock, flags);
2325
2326 if (!event)
2327 return IRQ_NONE;
2328
2329 /* If event is ~(u32)0 cardbus card was ejected. In this case
2330 * we just return, and clean up in the ohci1394_pci_remove
2331 * function. */
2332 if (event == ~(u32) 0) {
2333 DBGMSG("Device removed.");
2334 return IRQ_NONE;
2335 }
2336
2337 DBGMSG("IntEvent: %08x", event);
2338
2339 if (event & OHCI1394_unrecoverableError) {
2340 int ctx;
2341 PRINT(KERN_ERR, "Unrecoverable error!");
2342
2343 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2344 PRINT(KERN_ERR, "Async Req Tx Context died: "
2345 "ctrl[%08x] cmdptr[%08x]",
2346 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2347 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2348
2349 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2350 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2351 "ctrl[%08x] cmdptr[%08x]",
2352 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2353 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2354
2355 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2356 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2357 "ctrl[%08x] cmdptr[%08x]",
2358 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2359 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2360
2361 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2362 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2363 "ctrl[%08x] cmdptr[%08x]",
2364 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2365 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2366
2367 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2368 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2369 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2370 "ctrl[%08x] cmdptr[%08x]", ctx,
2371 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2372 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2373 }
2374
2375 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2376 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2377 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2378 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2379 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2380 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2381 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2382 }
2383
2384 event &= ~OHCI1394_unrecoverableError;
2385 }
Jody McIntyree2f81652006-03-28 19:55:11 -05002386 if (event & OHCI1394_postedWriteErr) {
2387 PRINT(KERN_ERR, "physical posted write error");
2388 /* no recovery strategy yet, had to involve protocol drivers */
2389 }
Ben Collins57fdb582006-06-12 18:12:21 -04002390 if (event & OHCI1394_cycleTooLong) {
2391 if(printk_ratelimit())
2392 PRINT(KERN_WARNING, "isochronous cycle too long");
2393 else
2394 DBGMSG("OHCI1394_cycleTooLong");
2395 reg_write(ohci, OHCI1394_LinkControlSet,
2396 OHCI1394_LinkControl_CycleMaster);
2397 event &= ~OHCI1394_cycleTooLong;
2398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 if (event & OHCI1394_cycleInconsistent) {
2400 /* We subscribe to the cycleInconsistent event only to
2401 * clear the corresponding event bit... otherwise,
2402 * isochronous cycleMatch DMA won't work. */
2403 DBGMSG("OHCI1394_cycleInconsistent");
2404 event &= ~OHCI1394_cycleInconsistent;
2405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 if (event & OHCI1394_busReset) {
2407 /* The busReset event bit can't be cleared during the
2408 * selfID phase, so we disable busReset interrupts, to
2409 * avoid burying the cpu in interrupt requests. */
2410 spin_lock_irqsave(&ohci->event_lock, flags);
2411 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2412
2413 if (ohci->check_busreset) {
2414 int loop_count = 0;
2415
2416 udelay(10);
2417
2418 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2419 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2420
2421 spin_unlock_irqrestore(&ohci->event_lock, flags);
2422 udelay(10);
2423 spin_lock_irqsave(&ohci->event_lock, flags);
2424
2425 /* The loop counter check is to prevent the driver
2426 * from remaining in this state forever. For the
2427 * initial bus reset, the loop continues for ever
2428 * and the system hangs, until some device is plugged-in
2429 * or out manually into a port! The forced reset seems
2430 * to solve this problem. This mainly effects nForce2. */
2431 if (loop_count > 10000) {
2432 ohci_devctl(host, RESET_BUS, LONG_RESET);
2433 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2434 loop_count = 0;
2435 }
2436
2437 loop_count++;
2438 }
2439 }
2440 spin_unlock_irqrestore(&ohci->event_lock, flags);
2441 if (!host->in_bus_reset) {
2442 DBGMSG("irq_handler: Bus reset requested");
2443
2444 /* Subsystem call */
2445 hpsb_bus_reset(ohci->host);
2446 }
2447 event &= ~OHCI1394_busReset;
2448 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 if (event & OHCI1394_reqTxComplete) {
2450 struct dma_trm_ctx *d = &ohci->at_req_context;
2451 DBGMSG("Got reqTxComplete interrupt "
2452 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2453 if (reg_read(ohci, d->ctrlSet) & 0x800)
2454 ohci1394_stop_context(ohci, d->ctrlClear,
2455 "reqTxComplete");
2456 else
2457 dma_trm_tasklet((unsigned long)d);
2458 //tasklet_schedule(&d->task);
2459 event &= ~OHCI1394_reqTxComplete;
2460 }
2461 if (event & OHCI1394_respTxComplete) {
2462 struct dma_trm_ctx *d = &ohci->at_resp_context;
2463 DBGMSG("Got respTxComplete interrupt "
2464 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2465 if (reg_read(ohci, d->ctrlSet) & 0x800)
2466 ohci1394_stop_context(ohci, d->ctrlClear,
2467 "respTxComplete");
2468 else
2469 tasklet_schedule(&d->task);
2470 event &= ~OHCI1394_respTxComplete;
2471 }
2472 if (event & OHCI1394_RQPkt) {
2473 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2474 DBGMSG("Got RQPkt interrupt status=0x%08X",
2475 reg_read(ohci, d->ctrlSet));
2476 if (reg_read(ohci, d->ctrlSet) & 0x800)
2477 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2478 else
2479 tasklet_schedule(&d->task);
2480 event &= ~OHCI1394_RQPkt;
2481 }
2482 if (event & OHCI1394_RSPkt) {
2483 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2484 DBGMSG("Got RSPkt interrupt status=0x%08X",
2485 reg_read(ohci, d->ctrlSet));
2486 if (reg_read(ohci, d->ctrlSet) & 0x800)
2487 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2488 else
2489 tasklet_schedule(&d->task);
2490 event &= ~OHCI1394_RSPkt;
2491 }
2492 if (event & OHCI1394_isochRx) {
2493 quadlet_t rx_event;
2494
2495 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2496 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2497 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2498 event &= ~OHCI1394_isochRx;
2499 }
2500 if (event & OHCI1394_isochTx) {
2501 quadlet_t tx_event;
2502
2503 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2504 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2505 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2506 event &= ~OHCI1394_isochTx;
2507 }
2508 if (event & OHCI1394_selfIDComplete) {
2509 if (host->in_bus_reset) {
2510 node_id = reg_read(ohci, OHCI1394_NodeID);
2511
2512 if (!(node_id & 0x80000000)) {
2513 PRINT(KERN_ERR,
2514 "SelfID received, but NodeID invalid "
2515 "(probably new bus reset occurred): %08X",
2516 node_id);
2517 goto selfid_not_valid;
2518 }
2519
2520 phyid = node_id & 0x0000003f;
2521 isroot = (node_id & 0x40000000) != 0;
2522
2523 DBGMSG("SelfID interrupt received "
2524 "(phyid %d, %s)", phyid,
2525 (isroot ? "root" : "not root"));
2526
2527 handle_selfid(ohci, host, phyid, isroot);
2528
2529 /* Clear the bus reset event and re-enable the
2530 * busReset interrupt. */
2531 spin_lock_irqsave(&ohci->event_lock, flags);
2532 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2533 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2534 spin_unlock_irqrestore(&ohci->event_lock, flags);
2535
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 /* Turn on phys dma reception.
2537 *
2538 * TODO: Enable some sort of filtering management.
2539 */
2540 if (phys_dma) {
Stefan Richter180a4302006-03-28 19:57:34 -05002541 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2542 0xffffffff);
2543 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2544 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 }
2546
2547 DBGMSG("PhyReqFilter=%08x%08x",
Stefan Richter180a4302006-03-28 19:57:34 -05002548 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2549 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
2551 hpsb_selfid_complete(host, phyid, isroot);
2552 } else
2553 PRINT(KERN_ERR,
2554 "SelfID received outside of bus reset sequence");
2555
2556selfid_not_valid:
2557 event &= ~OHCI1394_selfIDComplete;
2558 }
2559
2560 /* Make sure we handle everything, just in case we accidentally
2561 * enabled an interrupt that we didn't write a handler for. */
2562 if (event)
2563 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2564 event);
2565
2566 return IRQ_HANDLED;
2567}
2568
2569/* Put the buffer back into the dma context */
2570static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2571{
2572 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2573 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2574
2575 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2576 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2577 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2578 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2579
2580 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2581 * context program descriptors before it sees the wakeup bit set. */
2582 wmb();
2583
2584 /* wake up the dma context if necessary */
2585 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2586 PRINT(KERN_INFO,
2587 "Waking dma ctx=%d ... processing is probably too slow",
2588 d->ctx);
2589 }
2590
2591 /* do this always, to avoid race condition */
2592 reg_write(ohci, d->ctrlSet, 0x1000);
2593}
2594
2595#define cond_le32_to_cpu(data, noswap) \
2596 (noswap ? data : le32_to_cpu(data))
2597
2598static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2599 -1, 0, -1, 0, -1, -1, 16, -1};
2600
2601/*
2602 * Determine the length of a packet in the buffer
2603 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2604 */
2605static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2606 int offset, unsigned char tcode, int noswap)
2607{
2608 int length = -1;
2609
2610 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2611 length = TCODE_SIZE[tcode];
2612 if (length == 0) {
2613 if (offset + 12 >= d->buf_size) {
2614 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2615 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2616 } else {
2617 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2618 }
2619 length += 20;
2620 }
2621 } else if (d->type == DMA_CTX_ISO) {
2622 /* Assumption: buffer fill mode with header/trailer */
2623 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2624 }
2625
2626 if (length > 0 && length % 4)
2627 length += 4 - (length % 4);
2628
2629 return length;
2630}
2631
2632/* Tasklet that processes dma receive buffers */
2633static void dma_rcv_tasklet (unsigned long data)
2634{
2635 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2636 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2637 unsigned int split_left, idx, offset, rescount;
2638 unsigned char tcode;
2639 int length, bytes_left, ack;
2640 unsigned long flags;
2641 quadlet_t *buf_ptr;
2642 char *split_ptr;
2643 char msg[256];
2644
2645 spin_lock_irqsave(&d->lock, flags);
2646
2647 idx = d->buf_ind;
2648 offset = d->buf_offset;
2649 buf_ptr = d->buf_cpu[idx] + offset/4;
2650
2651 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2652 bytes_left = d->buf_size - rescount - offset;
2653
2654 while (bytes_left > 0) {
2655 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2656
2657 /* packet_length() will return < 4 for an error */
2658 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2659
2660 if (length < 4) { /* something is wrong */
2661 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2662 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2663 d->ctx, length);
2664 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2665 spin_unlock_irqrestore(&d->lock, flags);
2666 return;
2667 }
2668
2669 /* The first case is where we have a packet that crosses
2670 * over more than one descriptor. The next case is where
2671 * it's all in the first descriptor. */
2672 if ((offset + length) > d->buf_size) {
2673 DBGMSG("Split packet rcv'd");
2674 if (length > d->split_buf_size) {
2675 ohci1394_stop_context(ohci, d->ctrlClear,
2676 "Split packet size exceeded");
2677 d->buf_ind = idx;
2678 d->buf_offset = offset;
2679 spin_unlock_irqrestore(&d->lock, flags);
2680 return;
2681 }
2682
2683 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2684 == d->buf_size) {
2685 /* Other part of packet not written yet.
2686 * this should never happen I think
2687 * anyway we'll get it on the next call. */
2688 PRINT(KERN_INFO,
2689 "Got only half a packet!");
2690 d->buf_ind = idx;
2691 d->buf_offset = offset;
2692 spin_unlock_irqrestore(&d->lock, flags);
2693 return;
2694 }
2695
2696 split_left = length;
2697 split_ptr = (char *)d->spb;
2698 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2699 split_left -= d->buf_size-offset;
2700 split_ptr += d->buf_size-offset;
2701 insert_dma_buffer(d, idx);
2702 idx = (idx+1) % d->num_desc;
2703 buf_ptr = d->buf_cpu[idx];
2704 offset=0;
2705
2706 while (split_left >= d->buf_size) {
2707 memcpy(split_ptr,buf_ptr,d->buf_size);
2708 split_ptr += d->buf_size;
2709 split_left -= d->buf_size;
2710 insert_dma_buffer(d, idx);
2711 idx = (idx+1) % d->num_desc;
2712 buf_ptr = d->buf_cpu[idx];
2713 }
2714
2715 if (split_left > 0) {
2716 memcpy(split_ptr, buf_ptr, split_left);
2717 offset = split_left;
2718 buf_ptr += offset/4;
2719 }
2720 } else {
2721 DBGMSG("Single packet rcv'd");
2722 memcpy(d->spb, buf_ptr, length);
2723 offset += length;
2724 buf_ptr += length/4;
2725 if (offset==d->buf_size) {
2726 insert_dma_buffer(d, idx);
2727 idx = (idx+1) % d->num_desc;
2728 buf_ptr = d->buf_cpu[idx];
2729 offset=0;
2730 }
2731 }
2732
2733 /* We get one phy packet to the async descriptor for each
2734 * bus reset. We always ignore it. */
2735 if (tcode != OHCI1394_TCODE_PHY) {
2736 if (!ohci->no_swap_incoming)
2737 packet_swab(d->spb, tcode);
2738 DBGMSG("Packet received from node"
2739 " %d ack=0x%02X spd=%d tcode=0x%X"
2740 " length=%d ctx=%d tlabel=%d",
2741 (d->spb[1]>>16)&0x3f,
2742 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2743 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2744 tcode, length, d->ctx,
Jody McIntyredfe547a2005-04-21 14:09:42 -07002745 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2748 == 0x11) ? 1 : 0;
2749
2750 hpsb_packet_received(ohci->host, d->spb,
2751 length-4, ack);
2752 }
2753#ifdef OHCI1394_DEBUG
2754 else
2755 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2756 d->ctx);
2757#endif
2758
2759 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2760
2761 bytes_left = d->buf_size - rescount - offset;
2762
2763 }
2764
2765 d->buf_ind = idx;
2766 d->buf_offset = offset;
2767
2768 spin_unlock_irqrestore(&d->lock, flags);
2769}
2770
2771/* Bottom half that processes sent packets */
2772static void dma_trm_tasklet (unsigned long data)
2773{
2774 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2775 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2776 struct hpsb_packet *packet, *ptmp;
2777 unsigned long flags;
2778 u32 status, ack;
2779 size_t datasize;
2780
2781 spin_lock_irqsave(&d->lock, flags);
2782
2783 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2784 datasize = packet->data_size;
2785 if (datasize && packet->type != hpsb_raw)
2786 status = le32_to_cpu(
2787 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2788 else
2789 status = le32_to_cpu(
2790 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2791
2792 if (status == 0)
2793 /* this packet hasn't been sent yet*/
2794 break;
2795
2796#ifdef OHCI1394_DEBUG
2797 if (datasize)
2798 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2799 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2800 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2801 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2802 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2803 status&0x1f, (status>>5)&0x3,
2804 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2805 d->ctx);
2806 else
2807 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
Jody McIntyredfe547a2005-04-21 14:09:42 -07002808 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2810 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2811 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2812 status&0x1f, (status>>5)&0x3,
2813 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2814 d->ctx);
2815 else
2816 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
Jody McIntyredfe547a2005-04-21 14:09:42 -07002817 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2819 >>16)&0x3f,
2820 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2821 >>4)&0xf,
2822 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2823 >>10)&0x3f,
2824 status&0x1f, (status>>5)&0x3,
2825 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2826 d->ctx);
2827#endif
2828
2829 if (status & 0x10) {
2830 ack = status & 0xf;
2831 } else {
2832 switch (status & 0x1f) {
2833 case EVT_NO_STATUS: /* that should never happen */
2834 case EVT_RESERVED_A: /* that should never happen */
2835 case EVT_LONG_PACKET: /* that should never happen */
2836 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2837 ack = ACKX_SEND_ERROR;
2838 break;
2839 case EVT_MISSING_ACK:
2840 ack = ACKX_TIMEOUT;
2841 break;
2842 case EVT_UNDERRUN:
2843 ack = ACKX_SEND_ERROR;
2844 break;
2845 case EVT_OVERRUN: /* that should never happen */
2846 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2847 ack = ACKX_SEND_ERROR;
2848 break;
2849 case EVT_DESCRIPTOR_READ:
2850 case EVT_DATA_READ:
2851 case EVT_DATA_WRITE:
2852 ack = ACKX_SEND_ERROR;
2853 break;
2854 case EVT_BUS_RESET: /* that should never happen */
2855 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2856 ack = ACKX_SEND_ERROR;
2857 break;
2858 case EVT_TIMEOUT:
2859 ack = ACKX_TIMEOUT;
2860 break;
2861 case EVT_TCODE_ERR:
2862 ack = ACKX_SEND_ERROR;
2863 break;
2864 case EVT_RESERVED_B: /* that should never happen */
2865 case EVT_RESERVED_C: /* that should never happen */
2866 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2867 ack = ACKX_SEND_ERROR;
2868 break;
2869 case EVT_UNKNOWN:
2870 case EVT_FLUSHED:
2871 ack = ACKX_SEND_ERROR;
2872 break;
2873 default:
2874 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2875 ack = ACKX_SEND_ERROR;
2876 BUG();
2877 }
2878 }
2879
2880 list_del_init(&packet->driver_list);
2881 hpsb_packet_sent(ohci->host, packet, ack);
2882
2883 if (datasize) {
2884 pci_unmap_single(ohci->dev,
2885 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2886 datasize, PCI_DMA_TODEVICE);
2887 OHCI_DMA_FREE("single Xmit data packet");
2888 }
2889
2890 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2891 d->free_prgs++;
2892 }
2893
2894 dma_trm_flush(ohci, d);
2895
2896 spin_unlock_irqrestore(&d->lock, flags);
2897}
2898
2899static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2900{
2901 if (d->ctrlClear) {
2902 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2903
2904 if (d->type == DMA_CTX_ISO) {
2905 /* disable interrupts */
2906 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2907 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2908 } else {
2909 tasklet_kill(&d->task);
2910 }
2911 }
2912}
2913
2914
2915static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2916{
2917 int i;
2918 struct ti_ohci *ohci = d->ohci;
2919
2920 if (ohci == NULL)
2921 return;
2922
2923 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2924
2925 if (d->buf_cpu) {
2926 for (i=0; i<d->num_desc; i++)
2927 if (d->buf_cpu[i] && d->buf_bus[i]) {
2928 pci_free_consistent(
2929 ohci->dev, d->buf_size,
2930 d->buf_cpu[i], d->buf_bus[i]);
2931 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2932 }
2933 kfree(d->buf_cpu);
2934 kfree(d->buf_bus);
2935 }
2936 if (d->prg_cpu) {
2937 for (i=0; i<d->num_desc; i++)
2938 if (d->prg_cpu[i] && d->prg_bus[i]) {
2939 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2940 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2941 }
2942 pci_pool_destroy(d->prg_pool);
2943 OHCI_DMA_FREE("dma_rcv prg pool");
2944 kfree(d->prg_cpu);
2945 kfree(d->prg_bus);
2946 }
Jody McIntyre616b8592005-05-16 21:54:01 -07002947 kfree(d->spb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
2949 /* Mark this context as freed. */
2950 d->ohci = NULL;
2951}
2952
2953static int
2954alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2955 enum context_type type, int ctx, int num_desc,
2956 int buf_size, int split_buf_size, int context_base)
2957{
2958 int i, len;
2959 static int num_allocs;
2960 static char pool_name[20];
2961
2962 d->ohci = ohci;
2963 d->type = type;
2964 d->ctx = ctx;
2965
2966 d->num_desc = num_desc;
2967 d->buf_size = buf_size;
2968 d->split_buf_size = split_buf_size;
2969
2970 d->ctrlSet = 0;
2971 d->ctrlClear = 0;
2972 d->cmdPtr = 0;
2973
Stefan Richter85511582005-11-07 06:31:45 -05002974 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2975 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976
2977 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2978 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2979 free_dma_rcv_ctx(d);
2980 return -ENOMEM;
2981 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982
Stefan Richter85511582005-11-07 06:31:45 -05002983 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2984 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985
2986 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2987 PRINT(KERN_ERR, "Failed to allocate dma prg");
2988 free_dma_rcv_ctx(d);
2989 return -ENOMEM;
2990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991
2992 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2993
2994 if (d->spb == NULL) {
2995 PRINT(KERN_ERR, "Failed to allocate split buffer");
2996 free_dma_rcv_ctx(d);
2997 return -ENOMEM;
2998 }
2999
3000 len = sprintf(pool_name, "ohci1394_rcv_prg");
3001 sprintf(pool_name+len, "%d", num_allocs);
3002 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3003 sizeof(struct dma_cmd), 4, 0);
3004 if(d->prg_pool == NULL)
3005 {
3006 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3007 free_dma_rcv_ctx(d);
3008 return -ENOMEM;
3009 }
3010 num_allocs++;
3011
3012 OHCI_DMA_ALLOC("dma_rcv prg pool");
3013
3014 for (i=0; i<d->num_desc; i++) {
3015 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3016 d->buf_size,
3017 d->buf_bus+i);
3018 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3019
3020 if (d->buf_cpu[i] != NULL) {
3021 memset(d->buf_cpu[i], 0, d->buf_size);
3022 } else {
3023 PRINT(KERN_ERR,
3024 "Failed to allocate dma buffer");
3025 free_dma_rcv_ctx(d);
3026 return -ENOMEM;
3027 }
3028
3029 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3030 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3031
3032 if (d->prg_cpu[i] != NULL) {
3033 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3034 } else {
3035 PRINT(KERN_ERR,
3036 "Failed to allocate dma prg");
3037 free_dma_rcv_ctx(d);
3038 return -ENOMEM;
3039 }
3040 }
3041
3042 spin_lock_init(&d->lock);
3043
3044 if (type == DMA_CTX_ISO) {
3045 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3046 OHCI_ISO_MULTICHANNEL_RECEIVE,
3047 dma_rcv_tasklet, (unsigned long) d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 } else {
3049 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3050 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3051 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3052
3053 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3054 }
3055
3056 return 0;
3057}
3058
3059static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3060{
3061 int i;
3062 struct ti_ohci *ohci = d->ohci;
3063
3064 if (ohci == NULL)
3065 return;
3066
3067 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3068
3069 if (d->prg_cpu) {
3070 for (i=0; i<d->num_desc; i++)
3071 if (d->prg_cpu[i] && d->prg_bus[i]) {
3072 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3073 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3074 }
3075 pci_pool_destroy(d->prg_pool);
3076 OHCI_DMA_FREE("dma_trm prg pool");
3077 kfree(d->prg_cpu);
3078 kfree(d->prg_bus);
3079 }
3080
3081 /* Mark this context as freed. */
3082 d->ohci = NULL;
3083}
3084
3085static int
3086alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3087 enum context_type type, int ctx, int num_desc,
3088 int context_base)
3089{
3090 int i, len;
3091 static char pool_name[20];
3092 static int num_allocs=0;
3093
3094 d->ohci = ohci;
3095 d->type = type;
3096 d->ctx = ctx;
3097 d->num_desc = num_desc;
3098 d->ctrlSet = 0;
3099 d->ctrlClear = 0;
3100 d->cmdPtr = 0;
3101
Stefan Richter85511582005-11-07 06:31:45 -05003102 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3103 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104
3105 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3106 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3107 free_dma_trm_ctx(d);
3108 return -ENOMEM;
3109 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110
3111 len = sprintf(pool_name, "ohci1394_trm_prg");
3112 sprintf(pool_name+len, "%d", num_allocs);
3113 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3114 sizeof(struct at_dma_prg), 4, 0);
3115 if (d->prg_pool == NULL) {
3116 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3117 free_dma_trm_ctx(d);
3118 return -ENOMEM;
3119 }
3120 num_allocs++;
3121
3122 OHCI_DMA_ALLOC("dma_rcv prg pool");
3123
3124 for (i = 0; i < d->num_desc; i++) {
3125 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3126 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3127
3128 if (d->prg_cpu[i] != NULL) {
3129 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3130 } else {
3131 PRINT(KERN_ERR,
3132 "Failed to allocate at dma prg");
3133 free_dma_trm_ctx(d);
3134 return -ENOMEM;
3135 }
3136 }
3137
3138 spin_lock_init(&d->lock);
3139
3140 /* initialize tasklet */
3141 if (type == DMA_CTX_ISO) {
3142 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3143 dma_trm_tasklet, (unsigned long) d);
3144 if (ohci1394_register_iso_tasklet(ohci,
3145 &ohci->it_legacy_tasklet) < 0) {
3146 PRINT(KERN_ERR, "No IT DMA context available");
3147 free_dma_trm_ctx(d);
3148 return -EBUSY;
3149 }
3150
3151 /* IT can be assigned to any context by register_iso_tasklet */
3152 d->ctx = ohci->it_legacy_tasklet.context;
3153 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3154 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3155 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3156 } else {
3157 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3158 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3159 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3160 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3161 }
3162
3163 return 0;
3164}
3165
3166static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3167{
3168 struct ti_ohci *ohci = host->hostdata;
3169
3170 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3171 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3172
3173 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3174}
3175
3176
3177static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3178 quadlet_t data, quadlet_t compare)
3179{
3180 struct ti_ohci *ohci = host->hostdata;
3181 int i;
3182
3183 reg_write(ohci, OHCI1394_CSRData, data);
3184 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3185 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3186
3187 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3188 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3189 break;
3190
3191 mdelay(1);
3192 }
3193
3194 return reg_read(ohci, OHCI1394_CSRData);
3195}
3196
3197static struct hpsb_host_driver ohci1394_driver = {
3198 .owner = THIS_MODULE,
3199 .name = OHCI1394_DRIVER_NAME,
3200 .set_hw_config_rom = ohci_set_hw_config_rom,
3201 .transmit_packet = ohci_transmit,
3202 .devctl = ohci_devctl,
3203 .isoctl = ohci_isoctl,
3204 .hw_csr_reg = ohci_hw_csr_reg,
3205};
3206
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207/***********************************
3208 * PCI Driver Interface functions *
3209 ***********************************/
3210
3211#define FAIL(err, fmt, args...) \
3212do { \
3213 PRINT_G(KERN_ERR, fmt , ## args); \
3214 ohci1394_pci_remove(dev); \
3215 return err; \
3216} while (0)
3217
3218static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3219 const struct pci_device_id *ent)
3220{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 struct hpsb_host *host;
3222 struct ti_ohci *ohci; /* shortcut to currently handled device */
3223 unsigned long ohci_base;
3224
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 if (pci_enable_device(dev))
3226 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3227 pci_set_master(dev);
3228
3229 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3230 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3231
3232 ohci = host->hostdata;
3233 ohci->dev = dev;
3234 ohci->host = host;
3235 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3236 host->pdev = dev;
3237 pci_set_drvdata(dev, ohci);
3238
3239 /* We don't want hardware swapping */
3240 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3241
3242 /* Some oddball Apple controllers do not order the selfid
3243 * properly, so we make up for it here. */
3244#ifndef __LITTLE_ENDIAN
3245 /* XXX: Need a better way to check this. I'm wondering if we can
3246 * read the values of the OHCI1394_PCI_HCI_Control and the
3247 * noByteSwapData registers to see if they were not cleared to
3248 * zero. Should this work? Obviously it's not defined what these
3249 * registers will read when they aren't supported. Bleh! */
3250 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3251 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3252 ohci->no_swap_incoming = 1;
3253 ohci->selfid_swap = 0;
3254 } else
3255 ohci->selfid_swap = 1;
3256#endif
3257
3258
3259#ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3260#define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3261#endif
3262
3263 /* These chipsets require a bit of extra care when checking after
3264 * a busreset. */
3265 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3266 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3267 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3268 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3269 ohci->check_busreset = 1;
3270
3271 /* We hardwire the MMIO length, since some CardBus adaptors
3272 * fail to report the right length. Anyway, the ohci spec
3273 * clearly says it's 2kb, so this shouldn't be a problem. */
3274 ohci_base = pci_resource_start(dev, 0);
Jody McIntyre94c2d012006-03-28 20:04:04 -05003275 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3276 PRINT(KERN_WARNING, "PCI resource length of %lx too small!",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 pci_resource_len(dev, 0));
3278
3279 /* Seems PCMCIA handles this internally. Not sure why. Seems
3280 * pretty bogus to force a driver to special case this. */
3281#ifndef PCMCIA
3282 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3283 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3284 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3285#endif
3286 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3287
3288 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3289 if (ohci->registers == NULL)
3290 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3291 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3292 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3293
3294 /* csr_config rom allocation */
3295 ohci->csr_config_rom_cpu =
3296 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3297 &ohci->csr_config_rom_bus);
3298 OHCI_DMA_ALLOC("consistent csr_config_rom");
3299 if (ohci->csr_config_rom_cpu == NULL)
3300 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3301 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3302
3303 /* self-id dma buffer allocation */
3304 ohci->selfid_buf_cpu =
3305 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3306 &ohci->selfid_buf_bus);
3307 OHCI_DMA_ALLOC("consistent selfid_buf");
3308
3309 if (ohci->selfid_buf_cpu == NULL)
3310 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3311 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3312
3313 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3314 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3315 "8Kb boundary... may cause problems on some CXD3222 chip",
3316 ohci->selfid_buf_cpu);
3317
3318 /* No self-id errors at startup */
3319 ohci->self_id_errors = 0;
3320
3321 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3322 /* AR DMA request context allocation */
3323 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3324 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3325 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3326 OHCI1394_AsReqRcvContextBase) < 0)
3327 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3328
3329 /* AR DMA response context allocation */
3330 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3331 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3332 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3333 OHCI1394_AsRspRcvContextBase) < 0)
3334 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3335
3336 /* AT DMA request context */
3337 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3338 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3339 OHCI1394_AsReqTrContextBase) < 0)
3340 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3341
3342 /* AT DMA response context */
3343 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3344 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3345 OHCI1394_AsRspTrContextBase) < 0)
3346 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3347
3348 /* Start off with a soft reset, to clear everything to a sane
3349 * state. */
3350 ohci_soft_reset(ohci);
3351
3352 /* Now enable LPS, which we need in order to start accessing
3353 * most of the registers. In fact, on some cards (ALI M5251),
3354 * accessing registers in the SClk domain without LPS enabled
3355 * will lock up the machine. Wait 50msec to make sure we have
3356 * full link enabled. */
3357 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3358
3359 /* Disable and clear interrupts */
3360 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3361 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3362
3363 mdelay(50);
3364
3365 /* Determine the number of available IR and IT contexts. */
3366 ohci->nb_iso_rcv_ctx =
3367 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 ohci->nb_iso_xmit_ctx =
3369 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370
3371 /* Set the usage bits for non-existent contexts so they can't
3372 * be allocated */
3373 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3374 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3375
3376 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3377 spin_lock_init(&ohci->iso_tasklet_list_lock);
3378 ohci->ISO_channel_usage = 0;
3379 spin_lock_init(&ohci->IR_channel_lock);
3380
3381 /* Allocate the IR DMA context right here so we don't have
3382 * to do it in interrupt path - note that this doesn't
3383 * waste much memory and avoids the jugglery required to
3384 * allocate it in IRQ path. */
3385 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3386 DMA_CTX_ISO, 0, IR_NUM_DESC,
3387 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3388 OHCI1394_IsoRcvContextBase) < 0) {
3389 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3390 }
3391
3392 /* We hopefully don't have to pre-allocate IT DMA like we did
3393 * for IR DMA above. Allocate it on-demand and mark inactive. */
3394 ohci->it_legacy_context.ohci = NULL;
Al Viro3515d012005-08-25 23:13:14 +01003395 spin_lock_init(&ohci->event_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396
Al Viro3515d012005-08-25 23:13:14 +01003397 /*
3398 * interrupts are disabled, all right, but... due to SA_SHIRQ we
3399 * might get called anyway. We'll see no event, of course, but
3400 * we need to get to that "no event", so enough should be initialized
3401 * by that point.
3402 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3404 OHCI1394_DRIVER_NAME, ohci))
3405 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3406
3407 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3408 ohci_initialize(ohci);
3409
3410 /* Set certain csr values */
3411 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3412 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3413 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3414 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3415 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3416
3417 /* Tell the highlevel this host is ready */
3418 if (hpsb_add_host(host))
3419 FAIL(-ENOMEM, "Failed to register host with highlevel");
3420
3421 ohci->init_state = OHCI_INIT_DONE;
3422
3423 return 0;
3424#undef FAIL
3425}
3426
3427static void ohci1394_pci_remove(struct pci_dev *pdev)
3428{
3429 struct ti_ohci *ohci;
3430 struct device *dev;
3431
3432 ohci = pci_get_drvdata(pdev);
3433 if (!ohci)
3434 return;
3435
3436 dev = get_device(&ohci->host->device);
3437
3438 switch (ohci->init_state) {
3439 case OHCI_INIT_DONE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440 hpsb_remove_host(ohci->host);
3441
3442 /* Clear out BUS Options */
3443 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3444 reg_write(ohci, OHCI1394_BusOptions,
3445 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3446 0x00ff0000);
3447 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3448
3449 case OHCI_INIT_HAVE_IRQ:
3450 /* Clear interrupt registers */
3451 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3452 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3453 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3454 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3455 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3456 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3457
3458 /* Disable IRM Contender */
3459 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3460
3461 /* Clear link control register */
3462 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3463
3464 /* Let all other nodes know to ignore us */
3465 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3466
3467 /* Soft reset before we start - this disables
3468 * interrupts and clears linkEnable and LPS. */
3469 ohci_soft_reset(ohci);
3470 free_irq(ohci->dev->irq, ohci);
3471
3472 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3473 /* The ohci_soft_reset() stops all DMA contexts, so we
3474 * dont need to do this. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 free_dma_rcv_ctx(&ohci->ar_req_context);
3476 free_dma_rcv_ctx(&ohci->ar_resp_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 free_dma_trm_ctx(&ohci->at_req_context);
3478 free_dma_trm_ctx(&ohci->at_resp_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 free_dma_rcv_ctx(&ohci->ir_legacy_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 free_dma_trm_ctx(&ohci->it_legacy_context);
3481
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 case OHCI_INIT_HAVE_SELFID_BUFFER:
3483 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3484 ohci->selfid_buf_cpu,
3485 ohci->selfid_buf_bus);
3486 OHCI_DMA_FREE("consistent selfid_buf");
3487
3488 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3489 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3490 ohci->csr_config_rom_cpu,
3491 ohci->csr_config_rom_bus);
3492 OHCI_DMA_FREE("consistent csr_config_rom");
3493
3494 case OHCI_INIT_HAVE_IOMAPPING:
3495 iounmap(ohci->registers);
3496
3497 case OHCI_INIT_HAVE_MEM_REGION:
3498#ifndef PCMCIA
3499 release_mem_region(pci_resource_start(ohci->dev, 0),
3500 OHCI1394_REGISTER_SIZE);
3501#endif
3502
3503#ifdef CONFIG_PPC_PMAC
3504 /* On UniNorth, power down the cable and turn off the chip
3505 * clock when the module is removed to save power on
3506 * laptops. Turning it back ON is done by the arch code when
3507 * pci_enable_device() is called */
3508 {
3509 struct device_node* of_node;
3510
3511 of_node = pci_device_to_OF_node(ohci->dev);
3512 if (of_node) {
3513 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3514 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3515 }
3516 }
3517#endif /* CONFIG_PPC_PMAC */
3518
3519 case OHCI_INIT_ALLOC_HOST:
3520 pci_set_drvdata(ohci->dev, NULL);
3521 }
3522
3523 if (dev)
3524 put_device(dev);
3525}
3526
3527
3528static int ohci1394_pci_resume (struct pci_dev *pdev)
3529{
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003530#ifdef CONFIG_PPC_PMAC
Benjamin Herrenschmidte8222502006-03-28 23:15:54 +11003531 if (machine_is(powermac)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 struct device_node *of_node;
3533
3534 /* Re-enable 1394 */
3535 of_node = pci_device_to_OF_node (pdev);
3536 if (of_node)
3537 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3538 }
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003539#endif /* CONFIG_PPC_PMAC */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
3541 pci_enable_device(pdev);
3542
3543 return 0;
3544}
3545
3546
3547static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3548{
Benjamin Herrenschmidt8c870932005-06-27 14:36:34 -07003549#ifdef CONFIG_PPC_PMAC
Benjamin Herrenschmidte8222502006-03-28 23:15:54 +11003550 if (machine_is(powermac)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 struct device_node *of_node;
3552
3553 /* Disable 1394 */
3554 of_node = pci_device_to_OF_node (pdev);
3555 if (of_node)
3556 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3557 }
3558#endif
3559
3560 return 0;
3561}
3562
3563
3564#define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3565
3566static struct pci_device_id ohci1394_pci_tbl[] = {
3567 {
3568 .class = PCI_CLASS_FIREWIRE_OHCI,
3569 .class_mask = PCI_ANY_ID,
3570 .vendor = PCI_ANY_ID,
3571 .device = PCI_ANY_ID,
3572 .subvendor = PCI_ANY_ID,
3573 .subdevice = PCI_ANY_ID,
3574 },
3575 { 0, },
3576};
3577
3578MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3579
3580static struct pci_driver ohci1394_pci_driver = {
3581 .name = OHCI1394_DRIVER_NAME,
3582 .id_table = ohci1394_pci_tbl,
3583 .probe = ohci1394_pci_probe,
3584 .remove = ohci1394_pci_remove,
3585 .resume = ohci1394_pci_resume,
3586 .suspend = ohci1394_pci_suspend,
3587};
3588
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589/***********************************
3590 * OHCI1394 Video Interface *
3591 ***********************************/
3592
3593/* essentially the only purpose of this code is to allow another
3594 module to hook into ohci's interrupt handler */
3595
3596int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3597{
3598 int i=0;
3599
3600 /* stop the channel program if it's still running */
3601 reg_write(ohci, reg, 0x8000);
3602
3603 /* Wait until it effectively stops */
3604 while (reg_read(ohci, reg) & 0x400) {
3605 i++;
3606 if (i>5000) {
3607 PRINT(KERN_ERR,
3608 "Runaway loop while stopping context: %s...", msg ? msg : "");
3609 return 1;
3610 }
3611
3612 mb();
3613 udelay(10);
3614 }
3615 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3616 return 0;
3617}
3618
3619void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3620 void (*func)(unsigned long), unsigned long data)
3621{
3622 tasklet_init(&tasklet->tasklet, func, data);
3623 tasklet->type = type;
3624 /* We init the tasklet->link field, so we can list_del() it
3625 * without worrying whether it was added to the list or not. */
3626 INIT_LIST_HEAD(&tasklet->link);
3627}
3628
3629int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3630 struct ohci1394_iso_tasklet *tasklet)
3631{
3632 unsigned long flags, *usage;
3633 int n, i, r = -EBUSY;
3634
3635 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3636 n = ohci->nb_iso_xmit_ctx;
3637 usage = &ohci->it_ctx_usage;
3638 }
3639 else {
3640 n = ohci->nb_iso_rcv_ctx;
3641 usage = &ohci->ir_ctx_usage;
3642
3643 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3644 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3645 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3646 return r;
3647 }
3648 }
3649 }
3650
3651 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3652
3653 for (i = 0; i < n; i++)
3654 if (!test_and_set_bit(i, usage)) {
3655 tasklet->context = i;
3656 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3657 r = 0;
3658 break;
3659 }
3660
3661 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3662
3663 return r;
3664}
3665
3666void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3667 struct ohci1394_iso_tasklet *tasklet)
3668{
3669 unsigned long flags;
3670
3671 tasklet_kill(&tasklet->tasklet);
3672
3673 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3674
3675 if (tasklet->type == OHCI_ISO_TRANSMIT)
3676 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3677 else {
3678 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3679
3680 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3681 clear_bit(0, &ohci->ir_multichannel_used);
3682 }
3683 }
3684
3685 list_del(&tasklet->link);
3686
3687 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3688}
3689
3690EXPORT_SYMBOL(ohci1394_stop_context);
3691EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3692EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3693EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3694
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695/***********************************
3696 * General module initialization *
3697 ***********************************/
3698
3699MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3700MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3701MODULE_LICENSE("GPL");
3702
3703static void __exit ohci1394_cleanup (void)
3704{
3705 pci_unregister_driver(&ohci1394_pci_driver);
3706}
3707
3708static int __init ohci1394_init(void)
3709{
3710 return pci_register_driver(&ohci1394_pci_driver);
3711}
3712
3713module_init(ohci1394_init);
3714module_exit(ohci1394_cleanup);