blob: 325344bb3a1ed3b27a62c08624e2a0c89a29df49 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2
3 he.c
4
5 ForeRunnerHE ATM Adapter driver for ATM on Linux
6 Copyright (C) 1999-2001 Naval Research Laboratory
7
8 This library is free software; you can redistribute it and/or
9 modify it under the terms of the GNU Lesser General Public
10 License as published by the Free Software Foundation; either
11 version 2.1 of the License, or (at your option) any later version.
12
13 This library is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public
19 License along with this library; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
22*/
23
24/*
25
26 he.c
27
28 ForeRunnerHE ATM Adapter driver for ATM on Linux
29 Copyright (C) 1999-2001 Naval Research Laboratory
30
31 Permission to use, copy, modify and distribute this software and its
32 documentation is hereby granted, provided that both the copyright
33 notice and this permission notice appear in all copies of the software,
34 derivative works or modified versions, and any portions thereof, and
35 that both notices appear in supporting documentation.
36
37 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
38 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
39 RESULTING FROM THE USE OF THIS SOFTWARE.
40
41 This driver was written using the "Programmer's Reference Manual for
42 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
43
44 AUTHORS:
45 chas williams <chas@cmf.nrl.navy.mil>
46 eric kinzie <ekinzie@cmf.nrl.navy.mil>
47
48 NOTES:
49 4096 supported 'connections'
50 group 0 is used for all traffic
51 interrupt queue 0 is used for all interrupts
52 aal0 support (based on work from ulrich.u.muller@nokia.com)
53
54 */
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/kernel.h>
58#include <linux/skbuff.h>
59#include <linux/pci.h>
60#include <linux/errno.h>
61#include <linux/types.h>
62#include <linux/string.h>
63#include <linux/delay.h>
64#include <linux/init.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/timer.h>
68#include <linux/interrupt.h>
chas williamse5695f02005-04-24 18:55:35 -070069#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <asm/io.h>
71#include <asm/byteorder.h>
72#include <asm/uaccess.h>
73
74#include <linux/atmdev.h>
75#include <linux/atm.h>
76#include <linux/sonet.h>
77
78#define USE_TASKLET
79#undef USE_SCATTERGATHER
80#undef USE_CHECKSUM_HW /* still confused about this */
81#define USE_RBPS
82#undef USE_RBPS_POOL /* if memory is tight try this */
83#undef USE_RBPL_POOL /* if memory is tight try this */
84#define USE_TPD_POOL
85/* #undef CONFIG_ATM_HE_USE_SUNI */
86/* #undef HE_DEBUG */
87
88#include "he.h"
89#include "suni.h"
90#include <linux/atm_he.h>
91
92#define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
93
94#ifdef HE_DEBUG
95#define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
96#else /* !HE_DEBUG */
97#define HPRINTK(fmt,args...) do { } while (0)
98#endif /* HE_DEBUG */
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100/* declarations */
101
102static int he_open(struct atm_vcc *vcc);
103static void he_close(struct atm_vcc *vcc);
104static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
105static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
David Howells7d12e782006-10-05 14:55:46 +0100106static irqreturn_t he_irq_handler(int irq, void *dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107static void he_tasklet(unsigned long data);
108static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
109static int he_start(struct atm_dev *dev);
110static void he_stop(struct he_dev *dev);
111static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
112static unsigned char he_phy_get(struct atm_dev *, unsigned long);
113
114static u8 read_prom_byte(struct he_dev *he_dev, int addr);
115
116/* globals */
117
118static struct he_dev *he_devs;
119static int disable64;
120static short nvpibits = -1;
121static short nvcibits = -1;
122static short rx_skb_reserve = 16;
123static int irq_coalesce = 1;
124static int sdh = 0;
125
126/* Read from EEPROM = 0000 0011b */
127static unsigned int readtab[] = {
128 CS_HIGH | CLK_HIGH,
129 CS_LOW | CLK_LOW,
130 CLK_HIGH, /* 0 */
131 CLK_LOW,
132 CLK_HIGH, /* 0 */
133 CLK_LOW,
134 CLK_HIGH, /* 0 */
135 CLK_LOW,
136 CLK_HIGH, /* 0 */
137 CLK_LOW,
138 CLK_HIGH, /* 0 */
139 CLK_LOW,
140 CLK_HIGH, /* 0 */
141 CLK_LOW | SI_HIGH,
142 CLK_HIGH | SI_HIGH, /* 1 */
143 CLK_LOW | SI_HIGH,
144 CLK_HIGH | SI_HIGH /* 1 */
145};
146
147/* Clock to read from/write to the EEPROM */
148static unsigned int clocktab[] = {
149 CLK_LOW,
150 CLK_HIGH,
151 CLK_LOW,
152 CLK_HIGH,
153 CLK_LOW,
154 CLK_HIGH,
155 CLK_LOW,
156 CLK_HIGH,
157 CLK_LOW,
158 CLK_HIGH,
159 CLK_LOW,
160 CLK_HIGH,
161 CLK_LOW,
162 CLK_HIGH,
163 CLK_LOW,
164 CLK_HIGH,
165 CLK_LOW
166};
167
168static struct atmdev_ops he_ops =
169{
170 .open = he_open,
171 .close = he_close,
172 .ioctl = he_ioctl,
173 .send = he_send,
174 .phy_put = he_phy_put,
175 .phy_get = he_phy_get,
176 .proc_read = he_proc_read,
177 .owner = THIS_MODULE
178};
179
180#define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
181#define he_readl(dev, reg) readl((dev)->membase + (reg))
182
183/* section 2.12 connection memory access */
184
185static __inline__ void
186he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
187 unsigned flags)
188{
189 he_writel(he_dev, val, CON_DAT);
190 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
191 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
192 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
193}
194
195#define he_writel_rcm(dev, val, reg) \
196 he_writel_internal(dev, val, reg, CON_CTL_RCM)
197
198#define he_writel_tcm(dev, val, reg) \
199 he_writel_internal(dev, val, reg, CON_CTL_TCM)
200
201#define he_writel_mbox(dev, val, reg) \
202 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
203
204static unsigned
205he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
206{
207 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
208 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
209 return he_readl(he_dev, CON_DAT);
210}
211
212#define he_readl_rcm(dev, reg) \
213 he_readl_internal(dev, reg, CON_CTL_RCM)
214
215#define he_readl_tcm(dev, reg) \
216 he_readl_internal(dev, reg, CON_CTL_TCM)
217
218#define he_readl_mbox(dev, reg) \
219 he_readl_internal(dev, reg, CON_CTL_MBOX)
220
221
222/* figure 2.2 connection id */
223
224#define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
225
226/* 2.5.1 per connection transmit state registers */
227
228#define he_writel_tsr0(dev, val, cid) \
229 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
230#define he_readl_tsr0(dev, cid) \
231 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
232
233#define he_writel_tsr1(dev, val, cid) \
234 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
235
236#define he_writel_tsr2(dev, val, cid) \
237 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
238
239#define he_writel_tsr3(dev, val, cid) \
240 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
241
242#define he_writel_tsr4(dev, val, cid) \
243 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
244
245 /* from page 2-20
246 *
247 * NOTE While the transmit connection is active, bits 23 through 0
248 * of this register must not be written by the host. Byte
249 * enables should be used during normal operation when writing
250 * the most significant byte.
251 */
252
253#define he_writel_tsr4_upper(dev, val, cid) \
254 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
255 CON_CTL_TCM \
256 | CON_BYTE_DISABLE_2 \
257 | CON_BYTE_DISABLE_1 \
258 | CON_BYTE_DISABLE_0)
259
260#define he_readl_tsr4(dev, cid) \
261 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
262
263#define he_writel_tsr5(dev, val, cid) \
264 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
265
266#define he_writel_tsr6(dev, val, cid) \
267 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
268
269#define he_writel_tsr7(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
271
272
273#define he_writel_tsr8(dev, val, cid) \
274 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
275
276#define he_writel_tsr9(dev, val, cid) \
277 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
278
279#define he_writel_tsr10(dev, val, cid) \
280 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
281
282#define he_writel_tsr11(dev, val, cid) \
283 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
284
285
286#define he_writel_tsr12(dev, val, cid) \
287 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
288
289#define he_writel_tsr13(dev, val, cid) \
290 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
291
292
293#define he_writel_tsr14(dev, val, cid) \
294 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
295
296#define he_writel_tsr14_upper(dev, val, cid) \
297 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
298 CON_CTL_TCM \
299 | CON_BYTE_DISABLE_2 \
300 | CON_BYTE_DISABLE_1 \
301 | CON_BYTE_DISABLE_0)
302
303/* 2.7.1 per connection receive state registers */
304
305#define he_writel_rsr0(dev, val, cid) \
306 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
307#define he_readl_rsr0(dev, cid) \
308 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
309
310#define he_writel_rsr1(dev, val, cid) \
311 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
312
313#define he_writel_rsr2(dev, val, cid) \
314 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
315
316#define he_writel_rsr3(dev, val, cid) \
317 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
318
319#define he_writel_rsr4(dev, val, cid) \
320 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
321
322#define he_writel_rsr5(dev, val, cid) \
323 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
324
325#define he_writel_rsr6(dev, val, cid) \
326 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
327
328#define he_writel_rsr7(dev, val, cid) \
329 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
330
331static __inline__ struct atm_vcc*
332__find_vcc(struct he_dev *he_dev, unsigned cid)
333{
334 struct hlist_head *head;
335 struct atm_vcc *vcc;
336 struct hlist_node *node;
337 struct sock *s;
338 short vpi;
339 int vci;
340
341 vpi = cid >> he_dev->vcibits;
342 vci = cid & ((1 << he_dev->vcibits) - 1);
343 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
344
345 sk_for_each(s, node, head) {
346 vcc = atm_sk(s);
347 if (vcc->dev == he_dev->atm_dev &&
348 vcc->vci == vci && vcc->vpi == vpi &&
349 vcc->qos.rxtp.traffic_class != ATM_NONE) {
350 return vcc;
351 }
352 }
353 return NULL;
354}
355
356static int __devinit
357he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
358{
359 struct atm_dev *atm_dev = NULL;
360 struct he_dev *he_dev = NULL;
361 int err = 0;
362
Adrian Bunk900092a2007-12-30 23:16:45 -0800363 printk(KERN_INFO "ATM he driver\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 if (pci_enable_device(pci_dev))
366 return -EIO;
chas williamse5695f02005-04-24 18:55:35 -0700367 if (pci_set_dma_mask(pci_dev, DMA_32BIT_MASK) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 printk(KERN_WARNING "he: no suitable dma available\n");
369 err = -EIO;
370 goto init_one_failure;
371 }
372
373 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
374 if (!atm_dev) {
375 err = -ENODEV;
376 goto init_one_failure;
377 }
378 pci_set_drvdata(pci_dev, atm_dev);
379
Om Narasimhan0c1cca12006-10-03 16:27:18 -0700380 he_dev = kzalloc(sizeof(struct he_dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 GFP_KERNEL);
382 if (!he_dev) {
383 err = -ENOMEM;
384 goto init_one_failure;
385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 he_dev->pci_dev = pci_dev;
387 he_dev->atm_dev = atm_dev;
388 he_dev->atm_dev->dev_data = he_dev;
389 atm_dev->dev_data = he_dev;
390 he_dev->number = atm_dev->number;
chas williams8a8037a2007-11-27 11:03:16 +0800391#ifdef USE_TASKLET
392 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
393#endif
394 spin_lock_init(&he_dev->global_lock);
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 if (he_start(atm_dev)) {
397 he_stop(he_dev);
398 err = -ENODEV;
399 goto init_one_failure;
400 }
401 he_dev->next = NULL;
402 if (he_devs)
403 he_dev->next = he_devs;
404 he_devs = he_dev;
405 return 0;
406
407init_one_failure:
408 if (atm_dev)
409 atm_dev_deregister(atm_dev);
Jesper Juhla2c1aa52005-06-02 13:04:07 -0700410 kfree(he_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 pci_disable_device(pci_dev);
412 return err;
413}
414
415static void __devexit
416he_remove_one (struct pci_dev *pci_dev)
417{
418 struct atm_dev *atm_dev;
419 struct he_dev *he_dev;
420
421 atm_dev = pci_get_drvdata(pci_dev);
422 he_dev = HE_DEV(atm_dev);
423
424 /* need to remove from he_devs */
425
426 he_stop(he_dev);
427 atm_dev_deregister(atm_dev);
428 kfree(he_dev);
429
430 pci_set_drvdata(pci_dev, NULL);
431 pci_disable_device(pci_dev);
432}
433
434
435static unsigned
436rate_to_atmf(unsigned rate) /* cps to atm forum format */
437{
438#define NONZERO (1 << 14)
439
440 unsigned exp = 0;
441
442 if (rate == 0)
443 return 0;
444
445 rate <<= 9;
446 while (rate > 0x3ff) {
447 ++exp;
448 rate >>= 1;
449 }
450
451 return (NONZERO | (exp << 9) | (rate & 0x1ff));
452}
453
Roland Dreier5b7c7142006-09-24 20:09:33 -0700454static void __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455he_init_rx_lbfp0(struct he_dev *he_dev)
456{
457 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
458 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
459 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
460 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
461
462 lbufd_index = 0;
463 lbm_offset = he_readl(he_dev, RCMLBM_BA);
464
465 he_writel(he_dev, lbufd_index, RLBF0_H);
466
467 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
468 lbufd_index += 2;
469 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
470
471 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
472 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
473
474 if (++lbuf_count == lbufs_per_row) {
475 lbuf_count = 0;
476 row_offset += he_dev->bytes_per_row;
477 }
478 lbm_offset += 4;
479 }
480
481 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
482 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
483}
484
Roland Dreier5b7c7142006-09-24 20:09:33 -0700485static void __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486he_init_rx_lbfp1(struct he_dev *he_dev)
487{
488 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
489 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
490 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
491 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
492
493 lbufd_index = 1;
494 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
495
496 he_writel(he_dev, lbufd_index, RLBF1_H);
497
498 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
499 lbufd_index += 2;
500 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
501
502 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
503 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
504
505 if (++lbuf_count == lbufs_per_row) {
506 lbuf_count = 0;
507 row_offset += he_dev->bytes_per_row;
508 }
509 lbm_offset += 4;
510 }
511
512 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
513 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
514}
515
Roland Dreier5b7c7142006-09-24 20:09:33 -0700516static void __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517he_init_tx_lbfp(struct he_dev *he_dev)
518{
519 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
520 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
521 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
522 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
523
524 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
525 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
526
527 he_writel(he_dev, lbufd_index, TLBF_H);
528
529 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
530 lbufd_index += 1;
531 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
532
533 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
534 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
535
536 if (++lbuf_count == lbufs_per_row) {
537 lbuf_count = 0;
538 row_offset += he_dev->bytes_per_row;
539 }
540 lbm_offset += 2;
541 }
542
543 he_writel(he_dev, lbufd_index - 1, TLBF_T);
544}
545
Roland Dreier5b7c7142006-09-24 20:09:33 -0700546static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547he_init_tpdrq(struct he_dev *he_dev)
548{
549 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
550 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
551 if (he_dev->tpdrq_base == NULL) {
552 hprintk("failed to alloc tpdrq\n");
553 return -ENOMEM;
554 }
555 memset(he_dev->tpdrq_base, 0,
556 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
557
558 he_dev->tpdrq_tail = he_dev->tpdrq_base;
559 he_dev->tpdrq_head = he_dev->tpdrq_base;
560
561 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
562 he_writel(he_dev, 0, TPDRQ_T);
563 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
564
565 return 0;
566}
567
Roland Dreier5b7c7142006-09-24 20:09:33 -0700568static void __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569he_init_cs_block(struct he_dev *he_dev)
570{
571 unsigned clock, rate, delta;
572 int reg;
573
574 /* 5.1.7 cs block initialization */
575
576 for (reg = 0; reg < 0x20; ++reg)
577 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
578
579 /* rate grid timer reload values */
580
581 clock = he_is622(he_dev) ? 66667000 : 50000000;
582 rate = he_dev->atm_dev->link_rate;
583 delta = rate / 16 / 2;
584
585 for (reg = 0; reg < 0x10; ++reg) {
586 /* 2.4 internal transmit function
587 *
588 * we initialize the first row in the rate grid.
589 * values are period (in clock cycles) of timer
590 */
591 unsigned period = clock / rate;
592
593 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
594 rate -= delta;
595 }
596
597 if (he_is622(he_dev)) {
598 /* table 5.2 (4 cells per lbuf) */
599 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
600 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
601 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
602 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
603 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
604
605 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
606 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
607 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
608 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
609 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
610 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
611 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
612
613 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
614
615 /* table 5.8 */
616 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
617 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
618 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
619 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
620 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
621 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
622
623 /* table 5.9 */
624 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
625 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
626 } else {
627 /* table 5.1 (4 cells per lbuf) */
628 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
629 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
630 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
631 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
632 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
633
634 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
635 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
636 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
637 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
638 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
639 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
640 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
641
642 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
643
644 /* table 5.8 */
645 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
646 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
647 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
648 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
649 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
650 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
651
652 /* table 5.9 */
653 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
654 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
655 }
656
657 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
658
659 for (reg = 0; reg < 0x8; ++reg)
660 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
661
662}
663
Roland Dreier5b7c7142006-09-24 20:09:33 -0700664static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665he_init_cs_block_rcm(struct he_dev *he_dev)
666{
667 unsigned (*rategrid)[16][16];
668 unsigned rate, delta;
669 int i, j, reg;
670
671 unsigned rate_atmf, exp, man;
672 unsigned long long rate_cps;
673 int mult, buf, buf_limit = 4;
674
675 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
676 if (!rategrid)
677 return -ENOMEM;
678
679 /* initialize rate grid group table */
680
681 for (reg = 0x0; reg < 0xff; ++reg)
682 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
683
684 /* initialize rate controller groups */
685
686 for (reg = 0x100; reg < 0x1ff; ++reg)
687 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
688
689 /* initialize tNrm lookup table */
690
691 /* the manual makes reference to a routine in a sample driver
692 for proper configuration; fortunately, we only need this
693 in order to support abr connection */
694
695 /* initialize rate to group table */
696
697 rate = he_dev->atm_dev->link_rate;
698 delta = rate / 32;
699
700 /*
701 * 2.4 transmit internal functions
702 *
703 * we construct a copy of the rate grid used by the scheduler
704 * in order to construct the rate to group table below
705 */
706
707 for (j = 0; j < 16; j++) {
708 (*rategrid)[0][j] = rate;
709 rate -= delta;
710 }
711
712 for (i = 1; i < 16; i++)
713 for (j = 0; j < 16; j++)
714 if (i > 14)
715 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
716 else
717 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
718
719 /*
720 * 2.4 transmit internal function
721 *
722 * this table maps the upper 5 bits of exponent and mantissa
723 * of the atm forum representation of the rate into an index
724 * on rate grid
725 */
726
727 rate_atmf = 0;
728 while (rate_atmf < 0x400) {
729 man = (rate_atmf & 0x1f) << 4;
730 exp = rate_atmf >> 5;
731
732 /*
733 instead of '/ 512', use '>> 9' to prevent a call
734 to divdu3 on x86 platforms
735 */
736 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
737
738 if (rate_cps < 10)
739 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
740
741 for (i = 255; i > 0; i--)
742 if ((*rategrid)[i/16][i%16] >= rate_cps)
743 break; /* pick nearest rate instead? */
744
745 /*
746 * each table entry is 16 bits: (rate grid index (8 bits)
747 * and a buffer limit (8 bits)
748 * there are two table entries in each 32-bit register
749 */
750
751#ifdef notdef
752 buf = rate_cps * he_dev->tx_numbuffs /
753 (he_dev->atm_dev->link_rate * 2);
754#else
755 /* this is pretty, but avoids _divdu3 and is mostly correct */
756 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
757 if (rate_cps > (272 * mult))
758 buf = 4;
759 else if (rate_cps > (204 * mult))
760 buf = 3;
761 else if (rate_cps > (136 * mult))
762 buf = 2;
763 else if (rate_cps > (68 * mult))
764 buf = 1;
765 else
766 buf = 0;
767#endif
768 if (buf > buf_limit)
769 buf = buf_limit;
770 reg = (reg << 16) | ((i << 8) | buf);
771
772#define RTGTBL_OFFSET 0x400
773
774 if (rate_atmf & 0x1)
775 he_writel_rcm(he_dev, reg,
776 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
777
778 ++rate_atmf;
779 }
780
781 kfree(rategrid);
782 return 0;
783}
784
Roland Dreier5b7c7142006-09-24 20:09:33 -0700785static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786he_init_group(struct he_dev *he_dev, int group)
787{
788 int i;
789
790#ifdef USE_RBPS
791 /* small buffer pool */
792#ifdef USE_RBPS_POOL
793 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
794 CONFIG_RBPS_BUFSIZE, 8, 0);
795 if (he_dev->rbps_pool == NULL) {
796 hprintk("unable to create rbps pages\n");
797 return -ENOMEM;
798 }
799#else /* !USE_RBPS_POOL */
800 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
801 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
802 if (he_dev->rbps_pages == NULL) {
803 hprintk("unable to create rbps page pool\n");
804 return -ENOMEM;
805 }
806#endif /* USE_RBPS_POOL */
807
808 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
809 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
810 if (he_dev->rbps_base == NULL) {
811 hprintk("failed to alloc rbps\n");
812 return -ENOMEM;
813 }
814 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
815 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
816
817 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
818 dma_addr_t dma_handle;
819 void *cpuaddr;
820
821#ifdef USE_RBPS_POOL
Christoph Lameter441e1432006-12-06 20:33:19 -0800822 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 if (cpuaddr == NULL)
824 return -ENOMEM;
825#else
826 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
827 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
828#endif
829
830 he_dev->rbps_virt[i].virt = cpuaddr;
831 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
832 he_dev->rbps_base[i].phys = dma_handle;
833
834 }
835 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
836
837 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
838 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
839 G0_RBPS_T + (group * 32));
840 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
841 G0_RBPS_BS + (group * 32));
842 he_writel(he_dev,
843 RBP_THRESH(CONFIG_RBPS_THRESH) |
844 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
845 RBP_INT_ENB,
846 G0_RBPS_QI + (group * 32));
847#else /* !USE_RBPS */
848 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
849 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
850 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
851 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
852 G0_RBPS_BS + (group * 32));
853#endif /* USE_RBPS */
854
855 /* large buffer pool */
856#ifdef USE_RBPL_POOL
857 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
858 CONFIG_RBPL_BUFSIZE, 8, 0);
859 if (he_dev->rbpl_pool == NULL) {
860 hprintk("unable to create rbpl pool\n");
861 return -ENOMEM;
862 }
863#else /* !USE_RBPL_POOL */
864 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
865 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
866 if (he_dev->rbpl_pages == NULL) {
867 hprintk("unable to create rbpl pages\n");
868 return -ENOMEM;
869 }
870#endif /* USE_RBPL_POOL */
871
872 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
873 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
874 if (he_dev->rbpl_base == NULL) {
875 hprintk("failed to alloc rbpl\n");
876 return -ENOMEM;
877 }
878 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
879 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
880
881 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
882 dma_addr_t dma_handle;
883 void *cpuaddr;
884
885#ifdef USE_RBPL_POOL
Christoph Lameter441e1432006-12-06 20:33:19 -0800886 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &dma_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 if (cpuaddr == NULL)
888 return -ENOMEM;
889#else
890 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
891 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
892#endif
893
894 he_dev->rbpl_virt[i].virt = cpuaddr;
895 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
896 he_dev->rbpl_base[i].phys = dma_handle;
897 }
898 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
899
900 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
901 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
902 G0_RBPL_T + (group * 32));
903 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
904 G0_RBPL_BS + (group * 32));
905 he_writel(he_dev,
906 RBP_THRESH(CONFIG_RBPL_THRESH) |
907 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
908 RBP_INT_ENB,
909 G0_RBPL_QI + (group * 32));
910
911 /* rx buffer ready queue */
912
913 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
914 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
915 if (he_dev->rbrq_base == NULL) {
916 hprintk("failed to allocate rbrq\n");
917 return -ENOMEM;
918 }
919 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
920
921 he_dev->rbrq_head = he_dev->rbrq_base;
922 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
923 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
924 he_writel(he_dev,
925 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
926 G0_RBRQ_Q + (group * 16));
927 if (irq_coalesce) {
928 hprintk("coalescing interrupts\n");
929 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
930 G0_RBRQ_I + (group * 16));
931 } else
932 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
933 G0_RBRQ_I + (group * 16));
934
935 /* tx buffer ready queue */
936
937 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
938 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
939 if (he_dev->tbrq_base == NULL) {
940 hprintk("failed to allocate tbrq\n");
941 return -ENOMEM;
942 }
943 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
944
945 he_dev->tbrq_head = he_dev->tbrq_base;
946
947 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
948 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
949 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
950 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
951
952 return 0;
953}
954
Roland Dreier5b7c7142006-09-24 20:09:33 -0700955static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956he_init_irq(struct he_dev *he_dev)
957{
958 int i;
959
960 /* 2.9.3.5 tail offset for each interrupt queue is located after the
961 end of the interrupt queue */
962
963 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
964 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
965 if (he_dev->irq_base == NULL) {
966 hprintk("failed to allocate irq\n");
967 return -ENOMEM;
968 }
969 he_dev->irq_tailoffset = (unsigned *)
970 &he_dev->irq_base[CONFIG_IRQ_SIZE];
971 *he_dev->irq_tailoffset = 0;
972 he_dev->irq_head = he_dev->irq_base;
973 he_dev->irq_tail = he_dev->irq_base;
974
975 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
976 he_dev->irq_base[i].isw = ITYPE_INVALID;
977
978 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
979 he_writel(he_dev,
980 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
981 IRQ0_HEAD);
982 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
983 he_writel(he_dev, 0x0, IRQ0_DATA);
984
985 he_writel(he_dev, 0x0, IRQ1_BASE);
986 he_writel(he_dev, 0x0, IRQ1_HEAD);
987 he_writel(he_dev, 0x0, IRQ1_CNTL);
988 he_writel(he_dev, 0x0, IRQ1_DATA);
989
990 he_writel(he_dev, 0x0, IRQ2_BASE);
991 he_writel(he_dev, 0x0, IRQ2_HEAD);
992 he_writel(he_dev, 0x0, IRQ2_CNTL);
993 he_writel(he_dev, 0x0, IRQ2_DATA);
994
995 he_writel(he_dev, 0x0, IRQ3_BASE);
996 he_writel(he_dev, 0x0, IRQ3_HEAD);
997 he_writel(he_dev, 0x0, IRQ3_CNTL);
998 he_writel(he_dev, 0x0, IRQ3_DATA);
999
1000 /* 2.9.3.2 interrupt queue mapping registers */
1001
1002 he_writel(he_dev, 0x0, GRP_10_MAP);
1003 he_writel(he_dev, 0x0, GRP_32_MAP);
1004 he_writel(he_dev, 0x0, GRP_54_MAP);
1005 he_writel(he_dev, 0x0, GRP_76_MAP);
1006
Thomas Gleixnerdace1452006-07-01 19:29:38 -07001007 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, IRQF_DISABLED|IRQF_SHARED, DEV_LABEL, he_dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1009 return -EINVAL;
1010 }
1011
1012 he_dev->irq = he_dev->pci_dev->irq;
1013
1014 return 0;
1015}
1016
Chas Williamsd17f0862006-06-29 12:35:49 -07001017static int __devinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018he_start(struct atm_dev *dev)
1019{
1020 struct he_dev *he_dev;
1021 struct pci_dev *pci_dev;
1022 unsigned long membase;
1023
1024 u16 command;
1025 u32 gen_cntl_0, host_cntl, lb_swap;
1026 u8 cache_size, timer;
1027
1028 unsigned err;
1029 unsigned int status, reg;
1030 int i, group;
1031
1032 he_dev = HE_DEV(dev);
1033 pci_dev = he_dev->pci_dev;
1034
1035 membase = pci_resource_start(pci_dev, 0);
1036 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1037
1038 /*
1039 * pci bus controller initialization
1040 */
1041
1042 /* 4.3 pci bus controller-specific initialization */
1043 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1044 hprintk("can't read GEN_CNTL_0\n");
1045 return -EINVAL;
1046 }
1047 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1048 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1049 hprintk("can't write GEN_CNTL_0.\n");
1050 return -EINVAL;
1051 }
1052
1053 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1054 hprintk("can't read PCI_COMMAND.\n");
1055 return -EINVAL;
1056 }
1057
1058 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1059 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1060 hprintk("can't enable memory.\n");
1061 return -EINVAL;
1062 }
1063
1064 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1065 hprintk("can't read cache line size?\n");
1066 return -EINVAL;
1067 }
1068
1069 if (cache_size < 16) {
1070 cache_size = 16;
1071 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1072 hprintk("can't set cache line size to %d\n", cache_size);
1073 }
1074
1075 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1076 hprintk("can't read latency timer?\n");
1077 return -EINVAL;
1078 }
1079
1080 /* from table 3.9
1081 *
1082 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1083 *
1084 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1085 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1086 *
1087 */
1088#define LAT_TIMER 209
1089 if (timer < LAT_TIMER) {
1090 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1091 timer = LAT_TIMER;
1092 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1093 hprintk("can't set latency timer to %d\n", timer);
1094 }
1095
1096 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1097 hprintk("can't set up page mapping\n");
1098 return -EINVAL;
1099 }
1100
1101 /* 4.4 card reset */
1102 he_writel(he_dev, 0x0, RESET_CNTL);
1103 he_writel(he_dev, 0xff, RESET_CNTL);
1104
1105 udelay(16*1000); /* 16 ms */
1106 status = he_readl(he_dev, RESET_CNTL);
1107 if ((status & BOARD_RST_STATUS) == 0) {
1108 hprintk("reset failed\n");
1109 return -EINVAL;
1110 }
1111
1112 /* 4.5 set bus width */
1113 host_cntl = he_readl(he_dev, HOST_CNTL);
1114 if (host_cntl & PCI_BUS_SIZE64)
1115 gen_cntl_0 |= ENBL_64;
1116 else
1117 gen_cntl_0 &= ~ENBL_64;
1118
1119 if (disable64 == 1) {
1120 hprintk("disabling 64-bit pci bus transfers\n");
1121 gen_cntl_0 &= ~ENBL_64;
1122 }
1123
1124 if (gen_cntl_0 & ENBL_64)
1125 hprintk("64-bit transfers enabled\n");
1126
1127 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1128
1129 /* 4.7 read prom contents */
1130 for (i = 0; i < PROD_ID_LEN; ++i)
1131 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1132
1133 he_dev->media = read_prom_byte(he_dev, MEDIA);
1134
1135 for (i = 0; i < 6; ++i)
1136 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1137
1138 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1139 he_dev->prod_id,
1140 he_dev->media & 0x40 ? "SM" : "MM",
1141 dev->esi[0],
1142 dev->esi[1],
1143 dev->esi[2],
1144 dev->esi[3],
1145 dev->esi[4],
1146 dev->esi[5]);
1147 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1148 ATM_OC12_PCR : ATM_OC3_PCR;
1149
1150 /* 4.6 set host endianess */
1151 lb_swap = he_readl(he_dev, LB_SWAP);
1152 if (he_is622(he_dev))
1153 lb_swap &= ~XFER_SIZE; /* 4 cells */
1154 else
1155 lb_swap |= XFER_SIZE; /* 8 cells */
1156#ifdef __BIG_ENDIAN
1157 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1158#else
1159 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1160 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1161#endif /* __BIG_ENDIAN */
1162 he_writel(he_dev, lb_swap, LB_SWAP);
1163
1164 /* 4.8 sdram controller initialization */
1165 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1166
1167 /* 4.9 initialize rnum value */
1168 lb_swap |= SWAP_RNUM_MAX(0xf);
1169 he_writel(he_dev, lb_swap, LB_SWAP);
1170
1171 /* 4.10 initialize the interrupt queues */
1172 if ((err = he_init_irq(he_dev)) != 0)
1173 return err;
1174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 /* 4.11 enable pci bus controller state machines */
1176 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1177 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1178 he_writel(he_dev, host_cntl, HOST_CNTL);
1179
1180 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1181 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1182
1183 /*
1184 * atm network controller initialization
1185 */
1186
1187 /* 5.1.1 generic configuration state */
1188
1189 /*
1190 * local (cell) buffer memory map
1191 *
1192 * HE155 HE622
1193 *
1194 * 0 ____________1023 bytes 0 _______________________2047 bytes
1195 * | | | | |
1196 * | utility | | rx0 | |
1197 * 5|____________| 255|___________________| u |
1198 * 6| | 256| | t |
1199 * | | | | i |
1200 * | rx0 | row | tx | l |
1201 * | | | | i |
1202 * | | 767|___________________| t |
1203 * 517|____________| 768| | y |
1204 * row 518| | | rx1 | |
1205 * | | 1023|___________________|___|
1206 * | |
1207 * | tx |
1208 * | |
1209 * | |
1210 * 1535|____________|
1211 * 1536| |
1212 * | rx1 |
1213 * 2047|____________|
1214 *
1215 */
1216
1217 /* total 4096 connections */
1218 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1219 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1220
1221 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1222 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1223 return -ENODEV;
1224 }
1225
1226 if (nvpibits != -1) {
1227 he_dev->vpibits = nvpibits;
1228 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1229 }
1230
1231 if (nvcibits != -1) {
1232 he_dev->vcibits = nvcibits;
1233 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1234 }
1235
1236
1237 if (he_is622(he_dev)) {
1238 he_dev->cells_per_row = 40;
1239 he_dev->bytes_per_row = 2048;
1240 he_dev->r0_numrows = 256;
1241 he_dev->tx_numrows = 512;
1242 he_dev->r1_numrows = 256;
1243 he_dev->r0_startrow = 0;
1244 he_dev->tx_startrow = 256;
1245 he_dev->r1_startrow = 768;
1246 } else {
1247 he_dev->cells_per_row = 20;
1248 he_dev->bytes_per_row = 1024;
1249 he_dev->r0_numrows = 512;
1250 he_dev->tx_numrows = 1018;
1251 he_dev->r1_numrows = 512;
1252 he_dev->r0_startrow = 6;
1253 he_dev->tx_startrow = 518;
1254 he_dev->r1_startrow = 1536;
1255 }
1256
1257 he_dev->cells_per_lbuf = 4;
1258 he_dev->buffer_limit = 4;
1259 he_dev->r0_numbuffs = he_dev->r0_numrows *
1260 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1261 if (he_dev->r0_numbuffs > 2560)
1262 he_dev->r0_numbuffs = 2560;
1263
1264 he_dev->r1_numbuffs = he_dev->r1_numrows *
1265 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1266 if (he_dev->r1_numbuffs > 2560)
1267 he_dev->r1_numbuffs = 2560;
1268
1269 he_dev->tx_numbuffs = he_dev->tx_numrows *
1270 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1271 if (he_dev->tx_numbuffs > 5120)
1272 he_dev->tx_numbuffs = 5120;
1273
1274 /* 5.1.2 configure hardware dependent registers */
1275
1276 he_writel(he_dev,
1277 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1278 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1279 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1280 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1281 LBARB);
1282
1283 he_writel(he_dev, BANK_ON |
1284 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1285 SDRAMCON);
1286
1287 he_writel(he_dev,
1288 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1289 RM_RW_WAIT(1), RCMCONFIG);
1290 he_writel(he_dev,
1291 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1292 TM_RW_WAIT(1), TCMCONFIG);
1293
1294 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1295
1296 he_writel(he_dev,
1297 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1298 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1299 RX_VALVP(he_dev->vpibits) |
1300 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1301
1302 he_writel(he_dev, DRF_THRESH(0x20) |
1303 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1304 TX_VCI_MASK(he_dev->vcibits) |
1305 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1306
1307 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1308
1309 he_writel(he_dev, PHY_INT_ENB |
1310 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1311 RH_CONFIG);
1312
1313 /* 5.1.3 initialize connection memory */
1314
1315 for (i = 0; i < TCM_MEM_SIZE; ++i)
1316 he_writel_tcm(he_dev, 0, i);
1317
1318 for (i = 0; i < RCM_MEM_SIZE; ++i)
1319 he_writel_rcm(he_dev, 0, i);
1320
1321 /*
1322 * transmit connection memory map
1323 *
1324 * tx memory
1325 * 0x0 ___________________
1326 * | |
1327 * | |
1328 * | TSRa |
1329 * | |
1330 * | |
1331 * 0x8000|___________________|
1332 * | |
1333 * | TSRb |
1334 * 0xc000|___________________|
1335 * | |
1336 * | TSRc |
1337 * 0xe000|___________________|
1338 * | TSRd |
1339 * 0xf000|___________________|
1340 * | tmABR |
1341 * 0x10000|___________________|
1342 * | |
1343 * | tmTPD |
1344 * |___________________|
1345 * | |
1346 * ....
1347 * 0x1ffff|___________________|
1348 *
1349 *
1350 */
1351
1352 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1353 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1354 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1355 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1356 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1357
1358
1359 /*
1360 * receive connection memory map
1361 *
1362 * 0x0 ___________________
1363 * | |
1364 * | |
1365 * | RSRa |
1366 * | |
1367 * | |
1368 * 0x8000|___________________|
1369 * | |
1370 * | rx0/1 |
1371 * | LBM | link lists of local
1372 * | tx | buffer memory
1373 * | |
1374 * 0xd000|___________________|
1375 * | |
1376 * | rmABR |
1377 * 0xe000|___________________|
1378 * | |
1379 * | RSRb |
1380 * |___________________|
1381 * | |
1382 * ....
1383 * 0xffff|___________________|
1384 */
1385
1386 he_writel(he_dev, 0x08000, RCMLBM_BA);
1387 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1388 he_writel(he_dev, 0x0d800, RCMABR_BA);
1389
1390 /* 5.1.4 initialize local buffer free pools linked lists */
1391
1392 he_init_rx_lbfp0(he_dev);
1393 he_init_rx_lbfp1(he_dev);
1394
1395 he_writel(he_dev, 0x0, RLBC_H);
1396 he_writel(he_dev, 0x0, RLBC_T);
1397 he_writel(he_dev, 0x0, RLBC_H2);
1398
1399 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1400 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1401
1402 he_init_tx_lbfp(he_dev);
1403
1404 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1405
1406 /* 5.1.5 initialize intermediate receive queues */
1407
1408 if (he_is622(he_dev)) {
1409 he_writel(he_dev, 0x000f, G0_INMQ_S);
1410 he_writel(he_dev, 0x200f, G0_INMQ_L);
1411
1412 he_writel(he_dev, 0x001f, G1_INMQ_S);
1413 he_writel(he_dev, 0x201f, G1_INMQ_L);
1414
1415 he_writel(he_dev, 0x002f, G2_INMQ_S);
1416 he_writel(he_dev, 0x202f, G2_INMQ_L);
1417
1418 he_writel(he_dev, 0x003f, G3_INMQ_S);
1419 he_writel(he_dev, 0x203f, G3_INMQ_L);
1420
1421 he_writel(he_dev, 0x004f, G4_INMQ_S);
1422 he_writel(he_dev, 0x204f, G4_INMQ_L);
1423
1424 he_writel(he_dev, 0x005f, G5_INMQ_S);
1425 he_writel(he_dev, 0x205f, G5_INMQ_L);
1426
1427 he_writel(he_dev, 0x006f, G6_INMQ_S);
1428 he_writel(he_dev, 0x206f, G6_INMQ_L);
1429
1430 he_writel(he_dev, 0x007f, G7_INMQ_S);
1431 he_writel(he_dev, 0x207f, G7_INMQ_L);
1432 } else {
1433 he_writel(he_dev, 0x0000, G0_INMQ_S);
1434 he_writel(he_dev, 0x0008, G0_INMQ_L);
1435
1436 he_writel(he_dev, 0x0001, G1_INMQ_S);
1437 he_writel(he_dev, 0x0009, G1_INMQ_L);
1438
1439 he_writel(he_dev, 0x0002, G2_INMQ_S);
1440 he_writel(he_dev, 0x000a, G2_INMQ_L);
1441
1442 he_writel(he_dev, 0x0003, G3_INMQ_S);
1443 he_writel(he_dev, 0x000b, G3_INMQ_L);
1444
1445 he_writel(he_dev, 0x0004, G4_INMQ_S);
1446 he_writel(he_dev, 0x000c, G4_INMQ_L);
1447
1448 he_writel(he_dev, 0x0005, G5_INMQ_S);
1449 he_writel(he_dev, 0x000d, G5_INMQ_L);
1450
1451 he_writel(he_dev, 0x0006, G6_INMQ_S);
1452 he_writel(he_dev, 0x000e, G6_INMQ_L);
1453
1454 he_writel(he_dev, 0x0007, G7_INMQ_S);
1455 he_writel(he_dev, 0x000f, G7_INMQ_L);
1456 }
1457
1458 /* 5.1.6 application tunable parameters */
1459
1460 he_writel(he_dev, 0x0, MCC);
1461 he_writel(he_dev, 0x0, OEC);
1462 he_writel(he_dev, 0x0, DCC);
1463 he_writel(he_dev, 0x0, CEC);
1464
1465 /* 5.1.7 cs block initialization */
1466
1467 he_init_cs_block(he_dev);
1468
1469 /* 5.1.8 cs block connection memory initialization */
1470
1471 if (he_init_cs_block_rcm(he_dev) < 0)
1472 return -ENOMEM;
1473
1474 /* 5.1.10 initialize host structures */
1475
1476 he_init_tpdrq(he_dev);
1477
1478#ifdef USE_TPD_POOL
1479 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1480 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1481 if (he_dev->tpd_pool == NULL) {
1482 hprintk("unable to create tpd pci_pool\n");
1483 return -ENOMEM;
1484 }
1485
1486 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1487#else
1488 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1489 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1490 if (!he_dev->tpd_base)
1491 return -ENOMEM;
1492
1493 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1494 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1495 he_dev->tpd_base[i].inuse = 0;
1496 }
1497
1498 he_dev->tpd_head = he_dev->tpd_base;
1499 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1500#endif
1501
1502 if (he_init_group(he_dev, 0) != 0)
1503 return -ENOMEM;
1504
1505 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1506 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1507 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1508 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1509 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1510 G0_RBPS_BS + (group * 32));
1511
1512 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1513 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1514 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1515 G0_RBPL_QI + (group * 32));
1516 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1517
1518 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1519 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1520 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1521 G0_RBRQ_Q + (group * 16));
1522 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1523
1524 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1525 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1526 he_writel(he_dev, TBRQ_THRESH(0x1),
1527 G0_TBRQ_THRESH + (group * 16));
1528 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1529 }
1530
1531 /* host status page */
1532
1533 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1534 sizeof(struct he_hsp), &he_dev->hsp_phys);
1535 if (he_dev->hsp == NULL) {
1536 hprintk("failed to allocate host status page\n");
1537 return -ENOMEM;
1538 }
1539 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1540 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1541
1542 /* initialize framer */
1543
1544#ifdef CONFIG_ATM_HE_USE_SUNI
1545 suni_init(he_dev->atm_dev);
1546 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1547 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1548#endif /* CONFIG_ATM_HE_USE_SUNI */
1549
1550 if (sdh) {
1551 /* this really should be in suni.c but for now... */
1552 int val;
1553
1554 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1555 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1556 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1557 }
1558
1559 /* 5.1.12 enable transmit and receive */
1560
1561 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1562 reg |= TX_ENABLE|ER_ENABLE;
1563 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1564
1565 reg = he_readl(he_dev, RC_CONFIG);
1566 reg |= RX_ENABLE;
1567 he_writel(he_dev, reg, RC_CONFIG);
1568
1569 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1570 he_dev->cs_stper[i].inuse = 0;
1571 he_dev->cs_stper[i].pcr = -1;
1572 }
1573 he_dev->total_bw = 0;
1574
1575
1576 /* atm linux initialization */
1577
1578 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1579 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1580
1581 he_dev->irq_peak = 0;
1582 he_dev->rbrq_peak = 0;
1583 he_dev->rbpl_peak = 0;
1584 he_dev->tbrq_peak = 0;
1585
1586 HPRINTK("hell bent for leather!\n");
1587
1588 return 0;
1589}
1590
1591static void
1592he_stop(struct he_dev *he_dev)
1593{
1594 u16 command;
1595 u32 gen_cntl_0, reg;
1596 struct pci_dev *pci_dev;
1597
1598 pci_dev = he_dev->pci_dev;
1599
1600 /* disable interrupts */
1601
1602 if (he_dev->membase) {
1603 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1604 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1605 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1606
1607#ifdef USE_TASKLET
1608 tasklet_disable(&he_dev->tasklet);
1609#endif
1610
1611 /* disable recv and transmit */
1612
1613 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1614 reg &= ~(TX_ENABLE|ER_ENABLE);
1615 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1616
1617 reg = he_readl(he_dev, RC_CONFIG);
1618 reg &= ~(RX_ENABLE);
1619 he_writel(he_dev, reg, RC_CONFIG);
1620 }
1621
1622#ifdef CONFIG_ATM_HE_USE_SUNI
1623 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1624 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1625#endif /* CONFIG_ATM_HE_USE_SUNI */
1626
1627 if (he_dev->irq)
1628 free_irq(he_dev->irq, he_dev);
1629
1630 if (he_dev->irq_base)
1631 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1632 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1633
1634 if (he_dev->hsp)
1635 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1636 he_dev->hsp, he_dev->hsp_phys);
1637
1638 if (he_dev->rbpl_base) {
1639#ifdef USE_RBPL_POOL
1640 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1641 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1642 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1643
1644 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1645 }
1646#else
1647 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1648 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1649#endif
1650 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1651 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1652 }
1653
1654#ifdef USE_RBPL_POOL
1655 if (he_dev->rbpl_pool)
1656 pci_pool_destroy(he_dev->rbpl_pool);
1657#endif
1658
1659#ifdef USE_RBPS
1660 if (he_dev->rbps_base) {
1661#ifdef USE_RBPS_POOL
1662 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1663 void *cpuaddr = he_dev->rbps_virt[i].virt;
1664 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1665
1666 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1667 }
1668#else
1669 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1670 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1671#endif
1672 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1673 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1674 }
1675
1676#ifdef USE_RBPS_POOL
1677 if (he_dev->rbps_pool)
1678 pci_pool_destroy(he_dev->rbps_pool);
1679#endif
1680
1681#endif /* USE_RBPS */
1682
1683 if (he_dev->rbrq_base)
1684 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1685 he_dev->rbrq_base, he_dev->rbrq_phys);
1686
1687 if (he_dev->tbrq_base)
1688 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1689 he_dev->tbrq_base, he_dev->tbrq_phys);
1690
1691 if (he_dev->tpdrq_base)
1692 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1693 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1694
1695#ifdef USE_TPD_POOL
1696 if (he_dev->tpd_pool)
1697 pci_pool_destroy(he_dev->tpd_pool);
1698#else
1699 if (he_dev->tpd_base)
1700 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1701 he_dev->tpd_base, he_dev->tpd_base_phys);
1702#endif
1703
1704 if (he_dev->pci_dev) {
1705 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1706 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1707 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1708 }
1709
1710 if (he_dev->membase)
1711 iounmap(he_dev->membase);
1712}
1713
1714static struct he_tpd *
1715__alloc_tpd(struct he_dev *he_dev)
1716{
1717#ifdef USE_TPD_POOL
1718 struct he_tpd *tpd;
1719 dma_addr_t dma_handle;
1720
Christoph Lameter441e1432006-12-06 20:33:19 -08001721 tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &dma_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 if (tpd == NULL)
1723 return NULL;
1724
1725 tpd->status = TPD_ADDR(dma_handle);
1726 tpd->reserved = 0;
1727 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1728 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1729 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1730
1731 return tpd;
1732#else
1733 int i;
1734
1735 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1736 ++he_dev->tpd_head;
1737 if (he_dev->tpd_head > he_dev->tpd_end) {
1738 he_dev->tpd_head = he_dev->tpd_base;
1739 }
1740
1741 if (!he_dev->tpd_head->inuse) {
1742 he_dev->tpd_head->inuse = 1;
1743 he_dev->tpd_head->status &= TPD_MASK;
1744 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1745 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1746 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1747 return he_dev->tpd_head;
1748 }
1749 }
1750 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1751 return NULL;
1752#endif
1753}
1754
1755#define AAL5_LEN(buf,len) \
1756 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1757 (((unsigned char *)(buf))[(len)-5]))
1758
1759/* 2.10.1.2 receive
1760 *
1761 * aal5 packets can optionally return the tcp checksum in the lower
1762 * 16 bits of the crc (RSR0_TCP_CKSUM)
1763 */
1764
1765#define TCP_CKSUM(buf,len) \
1766 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1767 (((unsigned char *)(buf))[(len-1)]))
1768
1769static int
1770he_service_rbrq(struct he_dev *he_dev, int group)
1771{
1772 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1773 ((unsigned long)he_dev->rbrq_base |
1774 he_dev->hsp->group[group].rbrq_tail);
1775 struct he_rbp *rbp = NULL;
1776 unsigned cid, lastcid = -1;
1777 unsigned buf_len = 0;
1778 struct sk_buff *skb;
1779 struct atm_vcc *vcc = NULL;
1780 struct he_vcc *he_vcc;
1781 struct he_iovec *iov;
1782 int pdus_assembled = 0;
1783 int updated = 0;
1784
1785 read_lock(&vcc_sklist_lock);
1786 while (he_dev->rbrq_head != rbrq_tail) {
1787 ++updated;
1788
1789 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1790 he_dev->rbrq_head, group,
1791 RBRQ_ADDR(he_dev->rbrq_head),
1792 RBRQ_BUFLEN(he_dev->rbrq_head),
1793 RBRQ_CID(he_dev->rbrq_head),
1794 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1795 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1796 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1797 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1798 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1799 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1800
1801#ifdef USE_RBPS
1802 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1803 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1804 else
1805#endif
1806 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1807
1808 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1809 cid = RBRQ_CID(he_dev->rbrq_head);
1810
1811 if (cid != lastcid)
1812 vcc = __find_vcc(he_dev, cid);
1813 lastcid = cid;
1814
1815 if (vcc == NULL) {
1816 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1817 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1818 rbp->status &= ~RBP_LOANED;
1819
1820 goto next_rbrq_entry;
1821 }
1822
1823 he_vcc = HE_VCC(vcc);
1824 if (he_vcc == NULL) {
1825 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1826 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1827 rbp->status &= ~RBP_LOANED;
1828 goto next_rbrq_entry;
1829 }
1830
1831 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1832 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1833 atomic_inc(&vcc->stats->rx_drop);
1834 goto return_host_buffers;
1835 }
1836
1837 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1838 he_vcc->iov_tail->iov_len = buf_len;
1839 he_vcc->pdu_len += buf_len;
1840 ++he_vcc->iov_tail;
1841
1842 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1843 lastcid = -1;
1844 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1845 wake_up(&he_vcc->rx_waitq);
1846 goto return_host_buffers;
1847 }
1848
1849#ifdef notdef
1850 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1851 hprintk("iovec full! cid 0x%x\n", cid);
1852 goto return_host_buffers;
1853 }
1854#endif
1855 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1856 goto next_rbrq_entry;
1857
1858 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1859 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1860 HPRINTK("%s%s (%d.%d)\n",
1861 RBRQ_CRC_ERR(he_dev->rbrq_head)
1862 ? "CRC_ERR " : "",
1863 RBRQ_LEN_ERR(he_dev->rbrq_head)
1864 ? "LEN_ERR" : "",
1865 vcc->vpi, vcc->vci);
1866 atomic_inc(&vcc->stats->rx_err);
1867 goto return_host_buffers;
1868 }
1869
1870 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1871 GFP_ATOMIC);
1872 if (!skb) {
1873 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1874 goto return_host_buffers;
1875 }
1876
1877 if (rx_skb_reserve > 0)
1878 skb_reserve(skb, rx_skb_reserve);
1879
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001880 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 for (iov = he_vcc->iov_head;
1883 iov < he_vcc->iov_tail; ++iov) {
1884#ifdef USE_RBPS
1885 if (iov->iov_base & RBP_SMALLBUF)
1886 memcpy(skb_put(skb, iov->iov_len),
1887 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1888 else
1889#endif
1890 memcpy(skb_put(skb, iov->iov_len),
1891 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1892 }
1893
1894 switch (vcc->qos.aal) {
1895 case ATM_AAL0:
1896 /* 2.10.1.5 raw cell receive */
1897 skb->len = ATM_AAL0_SDU;
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001898 skb_set_tail_pointer(skb, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 break;
1900 case ATM_AAL5:
1901 /* 2.10.1.2 aal5 receive */
1902
1903 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001904 skb_set_tail_pointer(skb, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905#ifdef USE_CHECKSUM_HW
1906 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001907 skb->ip_summed = CHECKSUM_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 skb->csum = TCP_CKSUM(skb->data,
1909 he_vcc->pdu_len);
1910 }
1911#endif
1912 break;
1913 }
1914
1915#ifdef should_never_happen
1916 if (skb->len > vcc->qos.rxtp.max_sdu)
1917 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1918#endif
1919
1920#ifdef notdef
1921 ATM_SKB(skb)->vcc = vcc;
1922#endif
Chas Williams7f81dc02006-09-19 12:59:11 -07001923 spin_unlock(&he_dev->global_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 vcc->push(vcc, skb);
Chas Williams7f81dc02006-09-19 12:59:11 -07001925 spin_lock(&he_dev->global_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
1927 atomic_inc(&vcc->stats->rx);
1928
1929return_host_buffers:
1930 ++pdus_assembled;
1931
1932 for (iov = he_vcc->iov_head;
1933 iov < he_vcc->iov_tail; ++iov) {
1934#ifdef USE_RBPS
1935 if (iov->iov_base & RBP_SMALLBUF)
1936 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1937 else
1938#endif
1939 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1940
1941 rbp->status &= ~RBP_LOANED;
1942 }
1943
1944 he_vcc->iov_tail = he_vcc->iov_head;
1945 he_vcc->pdu_len = 0;
1946
1947next_rbrq_entry:
1948 he_dev->rbrq_head = (struct he_rbrq *)
1949 ((unsigned long) he_dev->rbrq_base |
1950 RBRQ_MASK(++he_dev->rbrq_head));
1951
1952 }
1953 read_unlock(&vcc_sklist_lock);
1954
1955 if (updated) {
1956 if (updated > he_dev->rbrq_peak)
1957 he_dev->rbrq_peak = updated;
1958
1959 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1960 G0_RBRQ_H + (group * 16));
1961 }
1962
1963 return pdus_assembled;
1964}
1965
1966static void
1967he_service_tbrq(struct he_dev *he_dev, int group)
1968{
1969 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1970 ((unsigned long)he_dev->tbrq_base |
1971 he_dev->hsp->group[group].tbrq_tail);
1972 struct he_tpd *tpd;
1973 int slot, updated = 0;
1974#ifdef USE_TPD_POOL
1975 struct he_tpd *__tpd;
1976#endif
1977
1978 /* 2.1.6 transmit buffer return queue */
1979
1980 while (he_dev->tbrq_head != tbrq_tail) {
1981 ++updated;
1982
1983 HPRINTK("tbrq%d 0x%x%s%s\n",
1984 group,
1985 TBRQ_TPD(he_dev->tbrq_head),
1986 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1987 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1988#ifdef USE_TPD_POOL
1989 tpd = NULL;
1990 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1991 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1992 tpd = __tpd;
1993 list_del(&__tpd->entry);
1994 break;
1995 }
1996 }
1997
1998 if (tpd == NULL) {
1999 hprintk("unable to locate tpd for dma buffer %x\n",
2000 TBRQ_TPD(he_dev->tbrq_head));
2001 goto next_tbrq_entry;
2002 }
2003#else
2004 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
2005#endif
2006
2007 if (TBRQ_EOS(he_dev->tbrq_head)) {
2008 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2009 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2010 if (tpd->vcc)
2011 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2012
2013 goto next_tbrq_entry;
2014 }
2015
2016 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2017 if (tpd->iovec[slot].addr)
2018 pci_unmap_single(he_dev->pci_dev,
2019 tpd->iovec[slot].addr,
2020 tpd->iovec[slot].len & TPD_LEN_MASK,
2021 PCI_DMA_TODEVICE);
2022 if (tpd->iovec[slot].len & TPD_LST)
2023 break;
2024
2025 }
2026
2027 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2028 if (tpd->vcc && tpd->vcc->pop)
2029 tpd->vcc->pop(tpd->vcc, tpd->skb);
2030 else
2031 dev_kfree_skb_any(tpd->skb);
2032 }
2033
2034next_tbrq_entry:
2035#ifdef USE_TPD_POOL
2036 if (tpd)
2037 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2038#else
2039 tpd->inuse = 0;
2040#endif
2041 he_dev->tbrq_head = (struct he_tbrq *)
2042 ((unsigned long) he_dev->tbrq_base |
2043 TBRQ_MASK(++he_dev->tbrq_head));
2044 }
2045
2046 if (updated) {
2047 if (updated > he_dev->tbrq_peak)
2048 he_dev->tbrq_peak = updated;
2049
2050 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2051 G0_TBRQ_H + (group * 16));
2052 }
2053}
2054
2055
2056static void
2057he_service_rbpl(struct he_dev *he_dev, int group)
2058{
2059 struct he_rbp *newtail;
2060 struct he_rbp *rbpl_head;
2061 int moved = 0;
2062
2063 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2064 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2065
2066 for (;;) {
2067 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2068 RBPL_MASK(he_dev->rbpl_tail+1));
2069
2070 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2071 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2072 break;
2073
2074 newtail->status |= RBP_LOANED;
2075 he_dev->rbpl_tail = newtail;
2076 ++moved;
2077 }
2078
2079 if (moved)
2080 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2081}
2082
2083#ifdef USE_RBPS
2084static void
2085he_service_rbps(struct he_dev *he_dev, int group)
2086{
2087 struct he_rbp *newtail;
2088 struct he_rbp *rbps_head;
2089 int moved = 0;
2090
2091 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2092 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2093
2094 for (;;) {
2095 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2096 RBPS_MASK(he_dev->rbps_tail+1));
2097
2098 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2099 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2100 break;
2101
2102 newtail->status |= RBP_LOANED;
2103 he_dev->rbps_tail = newtail;
2104 ++moved;
2105 }
2106
2107 if (moved)
2108 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2109}
2110#endif /* USE_RBPS */
2111
2112static void
2113he_tasklet(unsigned long data)
2114{
2115 unsigned long flags;
2116 struct he_dev *he_dev = (struct he_dev *) data;
2117 int group, type;
2118 int updated = 0;
2119
2120 HPRINTK("tasklet (0x%lx)\n", data);
2121#ifdef USE_TASKLET
2122 spin_lock_irqsave(&he_dev->global_lock, flags);
2123#endif
2124
2125 while (he_dev->irq_head != he_dev->irq_tail) {
2126 ++updated;
2127
2128 type = ITYPE_TYPE(he_dev->irq_head->isw);
2129 group = ITYPE_GROUP(he_dev->irq_head->isw);
2130
2131 switch (type) {
2132 case ITYPE_RBRQ_THRESH:
2133 HPRINTK("rbrq%d threshold\n", group);
2134 /* fall through */
2135 case ITYPE_RBRQ_TIMER:
2136 if (he_service_rbrq(he_dev, group)) {
2137 he_service_rbpl(he_dev, group);
2138#ifdef USE_RBPS
2139 he_service_rbps(he_dev, group);
2140#endif /* USE_RBPS */
2141 }
2142 break;
2143 case ITYPE_TBRQ_THRESH:
2144 HPRINTK("tbrq%d threshold\n", group);
2145 /* fall through */
2146 case ITYPE_TPD_COMPLETE:
2147 he_service_tbrq(he_dev, group);
2148 break;
2149 case ITYPE_RBPL_THRESH:
2150 he_service_rbpl(he_dev, group);
2151 break;
2152 case ITYPE_RBPS_THRESH:
2153#ifdef USE_RBPS
2154 he_service_rbps(he_dev, group);
2155#endif /* USE_RBPS */
2156 break;
2157 case ITYPE_PHY:
2158 HPRINTK("phy interrupt\n");
2159#ifdef CONFIG_ATM_HE_USE_SUNI
2160 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2161 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2162 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2163 spin_lock_irqsave(&he_dev->global_lock, flags);
2164#endif
2165 break;
2166 case ITYPE_OTHER:
2167 switch (type|group) {
2168 case ITYPE_PARITY:
2169 hprintk("parity error\n");
2170 break;
2171 case ITYPE_ABORT:
2172 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2173 break;
2174 }
2175 break;
2176 case ITYPE_TYPE(ITYPE_INVALID):
2177 /* see 8.1.1 -- check all queues */
2178
2179 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2180
2181 he_service_rbrq(he_dev, 0);
2182 he_service_rbpl(he_dev, 0);
2183#ifdef USE_RBPS
2184 he_service_rbps(he_dev, 0);
2185#endif /* USE_RBPS */
2186 he_service_tbrq(he_dev, 0);
2187 break;
2188 default:
2189 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2190 }
2191
2192 he_dev->irq_head->isw = ITYPE_INVALID;
2193
2194 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2195 }
2196
2197 if (updated) {
2198 if (updated > he_dev->irq_peak)
2199 he_dev->irq_peak = updated;
2200
2201 he_writel(he_dev,
2202 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2203 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2204 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2205 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2206 }
2207#ifdef USE_TASKLET
2208 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2209#endif
2210}
2211
2212static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +01002213he_irq_handler(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214{
2215 unsigned long flags;
2216 struct he_dev *he_dev = (struct he_dev * )dev_id;
2217 int handled = 0;
2218
2219 if (he_dev == NULL)
2220 return IRQ_NONE;
2221
2222 spin_lock_irqsave(&he_dev->global_lock, flags);
2223
2224 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2225 (*he_dev->irq_tailoffset << 2));
2226
2227 if (he_dev->irq_tail == he_dev->irq_head) {
2228 HPRINTK("tailoffset not updated?\n");
2229 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2230 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2231 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2232 }
2233
2234#ifdef DEBUG
2235 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2236 hprintk("spurious (or shared) interrupt?\n");
2237#endif
2238
2239 if (he_dev->irq_head != he_dev->irq_tail) {
2240 handled = 1;
2241#ifdef USE_TASKLET
2242 tasklet_schedule(&he_dev->tasklet);
2243#else
2244 he_tasklet((unsigned long) he_dev);
2245#endif
2246 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2247 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2248 }
2249 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2250 return IRQ_RETVAL(handled);
2251
2252}
2253
2254static __inline__ void
2255__enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2256{
2257 struct he_tpdrq *new_tail;
2258
2259 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2260 tpd, cid, he_dev->tpdrq_tail);
2261
2262 /* new_tail = he_dev->tpdrq_tail; */
2263 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2264 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2265
2266 /*
2267 * check to see if we are about to set the tail == head
2268 * if true, update the head pointer from the adapter
2269 * to see if this is really the case (reading the queue
2270 * head for every enqueue would be unnecessarily slow)
2271 */
2272
2273 if (new_tail == he_dev->tpdrq_head) {
2274 he_dev->tpdrq_head = (struct he_tpdrq *)
2275 (((unsigned long)he_dev->tpdrq_base) |
2276 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2277
2278 if (new_tail == he_dev->tpdrq_head) {
Chas Williamsd730e102006-09-13 20:33:40 -07002279 int slot;
2280
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 hprintk("tpdrq full (cid 0x%x)\n", cid);
2282 /*
2283 * FIXME
2284 * push tpd onto a transmit backlog queue
2285 * after service_tbrq, service the backlog
2286 * for now, we just drop the pdu
2287 */
Chas Williamsd730e102006-09-13 20:33:40 -07002288 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2289 if (tpd->iovec[slot].addr)
2290 pci_unmap_single(he_dev->pci_dev,
2291 tpd->iovec[slot].addr,
2292 tpd->iovec[slot].len & TPD_LEN_MASK,
2293 PCI_DMA_TODEVICE);
2294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 if (tpd->skb) {
2296 if (tpd->vcc->pop)
2297 tpd->vcc->pop(tpd->vcc, tpd->skb);
2298 else
2299 dev_kfree_skb_any(tpd->skb);
2300 atomic_inc(&tpd->vcc->stats->tx_err);
2301 }
2302#ifdef USE_TPD_POOL
2303 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2304#else
2305 tpd->inuse = 0;
2306#endif
2307 return;
2308 }
2309 }
2310
2311 /* 2.1.5 transmit packet descriptor ready queue */
2312#ifdef USE_TPD_POOL
2313 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2314 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2315#else
2316 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2317 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2318#endif
2319 he_dev->tpdrq_tail->cid = cid;
2320 wmb();
2321
2322 he_dev->tpdrq_tail = new_tail;
2323
2324 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2325 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2326}
2327
2328static int
2329he_open(struct atm_vcc *vcc)
2330{
2331 unsigned long flags;
2332 struct he_dev *he_dev = HE_DEV(vcc->dev);
2333 struct he_vcc *he_vcc;
2334 int err = 0;
2335 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2336 short vpi = vcc->vpi;
2337 int vci = vcc->vci;
2338
2339 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2340 return 0;
2341
2342 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2343
2344 set_bit(ATM_VF_ADDR, &vcc->flags);
2345
2346 cid = he_mkcid(he_dev, vpi, vci);
2347
Robert P. J. Day5cbded52006-12-13 00:35:56 -08002348 he_vcc = kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 if (he_vcc == NULL) {
2350 hprintk("unable to allocate he_vcc during open\n");
2351 return -ENOMEM;
2352 }
2353
2354 he_vcc->iov_tail = he_vcc->iov_head;
2355 he_vcc->pdu_len = 0;
2356 he_vcc->rc_index = -1;
2357
2358 init_waitqueue_head(&he_vcc->rx_waitq);
2359 init_waitqueue_head(&he_vcc->tx_waitq);
2360
2361 vcc->dev_data = he_vcc;
2362
2363 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2364 int pcr_goal;
2365
2366 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2367 if (pcr_goal == 0)
2368 pcr_goal = he_dev->atm_dev->link_rate;
2369 if (pcr_goal < 0) /* means round down, technically */
2370 pcr_goal = -pcr_goal;
2371
2372 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2373
2374 switch (vcc->qos.aal) {
2375 case ATM_AAL5:
2376 tsr0_aal = TSR0_AAL5;
2377 tsr4 = TSR4_AAL5;
2378 break;
2379 case ATM_AAL0:
2380 tsr0_aal = TSR0_AAL0_SDU;
2381 tsr4 = TSR4_AAL0_SDU;
2382 break;
2383 default:
2384 err = -EINVAL;
2385 goto open_failed;
2386 }
2387
2388 spin_lock_irqsave(&he_dev->global_lock, flags);
2389 tsr0 = he_readl_tsr0(he_dev, cid);
2390 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2391
2392 if (TSR0_CONN_STATE(tsr0) != 0) {
2393 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2394 err = -EBUSY;
2395 goto open_failed;
2396 }
2397
2398 switch (vcc->qos.txtp.traffic_class) {
2399 case ATM_UBR:
2400 /* 2.3.3.1 open connection ubr */
2401
2402 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2403 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2404 break;
2405
2406 case ATM_CBR:
2407 /* 2.3.3.2 open connection cbr */
2408
2409 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2410 if ((he_dev->total_bw + pcr_goal)
2411 > (he_dev->atm_dev->link_rate * 9 / 10))
2412 {
2413 err = -EBUSY;
2414 goto open_failed;
2415 }
2416
2417 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2418
2419 /* find an unused cs_stper register */
2420 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2421 if (he_dev->cs_stper[reg].inuse == 0 ||
2422 he_dev->cs_stper[reg].pcr == pcr_goal)
2423 break;
2424
2425 if (reg == HE_NUM_CS_STPER) {
2426 err = -EBUSY;
2427 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2428 goto open_failed;
2429 }
2430
2431 he_dev->total_bw += pcr_goal;
2432
2433 he_vcc->rc_index = reg;
2434 ++he_dev->cs_stper[reg].inuse;
2435 he_dev->cs_stper[reg].pcr = pcr_goal;
2436
2437 clock = he_is622(he_dev) ? 66667000 : 50000000;
2438 period = clock / pcr_goal;
2439
2440 HPRINTK("rc_index = %d period = %d\n",
2441 reg, period);
2442
2443 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2444 CS_STPER0 + reg);
2445 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2446
2447 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2448 TSR0_RC_INDEX(reg);
2449
2450 break;
2451 default:
2452 err = -EINVAL;
2453 goto open_failed;
2454 }
2455
2456 spin_lock_irqsave(&he_dev->global_lock, flags);
2457
2458 he_writel_tsr0(he_dev, tsr0, cid);
2459 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2460 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2461 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2462 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2463 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2464
2465 he_writel_tsr3(he_dev, 0x0, cid);
2466 he_writel_tsr5(he_dev, 0x0, cid);
2467 he_writel_tsr6(he_dev, 0x0, cid);
2468 he_writel_tsr7(he_dev, 0x0, cid);
2469 he_writel_tsr8(he_dev, 0x0, cid);
2470 he_writel_tsr10(he_dev, 0x0, cid);
2471 he_writel_tsr11(he_dev, 0x0, cid);
2472 he_writel_tsr12(he_dev, 0x0, cid);
2473 he_writel_tsr13(he_dev, 0x0, cid);
2474 he_writel_tsr14(he_dev, 0x0, cid);
2475 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2476 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2477 }
2478
2479 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2480 unsigned aal;
2481
2482 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2483 &HE_VCC(vcc)->rx_waitq);
2484
2485 switch (vcc->qos.aal) {
2486 case ATM_AAL5:
2487 aal = RSR0_AAL5;
2488 break;
2489 case ATM_AAL0:
2490 aal = RSR0_RAWCELL;
2491 break;
2492 default:
2493 err = -EINVAL;
2494 goto open_failed;
2495 }
2496
2497 spin_lock_irqsave(&he_dev->global_lock, flags);
2498
2499 rsr0 = he_readl_rsr0(he_dev, cid);
2500 if (rsr0 & RSR0_OPEN_CONN) {
2501 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2502
2503 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2504 err = -EBUSY;
2505 goto open_failed;
2506 }
2507
2508#ifdef USE_RBPS
2509 rsr1 = RSR1_GROUP(0);
2510 rsr4 = RSR4_GROUP(0);
2511#else /* !USE_RBPS */
2512 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2513 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2514#endif /* USE_RBPS */
2515 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2516 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2517
2518#ifdef USE_CHECKSUM_HW
2519 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2520 rsr0 |= RSR0_TCP_CKSUM;
2521#endif
2522
2523 he_writel_rsr4(he_dev, rsr4, cid);
2524 he_writel_rsr1(he_dev, rsr1, cid);
2525 /* 5.1.11 last parameter initialized should be
2526 the open/closed indication in rsr0 */
2527 he_writel_rsr0(he_dev,
2528 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2529 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2530
2531 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2532 }
2533
2534open_failed:
2535
2536 if (err) {
Jesper Juhla2c1aa52005-06-02 13:04:07 -07002537 kfree(he_vcc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 clear_bit(ATM_VF_ADDR, &vcc->flags);
2539 }
2540 else
2541 set_bit(ATM_VF_READY, &vcc->flags);
2542
2543 return err;
2544}
2545
2546static void
2547he_close(struct atm_vcc *vcc)
2548{
2549 unsigned long flags;
2550 DECLARE_WAITQUEUE(wait, current);
2551 struct he_dev *he_dev = HE_DEV(vcc->dev);
2552 struct he_tpd *tpd;
2553 unsigned cid;
2554 struct he_vcc *he_vcc = HE_VCC(vcc);
2555#define MAX_RETRY 30
2556 int retry = 0, sleep = 1, tx_inuse;
2557
2558 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2559
2560 clear_bit(ATM_VF_READY, &vcc->flags);
2561 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2562
2563 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2564 int timeout;
2565
2566 HPRINTK("close rx cid 0x%x\n", cid);
2567
2568 /* 2.7.2.2 close receive operation */
2569
2570 /* wait for previous close (if any) to finish */
2571
2572 spin_lock_irqsave(&he_dev->global_lock, flags);
2573 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2574 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2575 udelay(250);
2576 }
2577
2578 set_current_state(TASK_UNINTERRUPTIBLE);
2579 add_wait_queue(&he_vcc->rx_waitq, &wait);
2580
2581 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2582 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2583 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2584 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2585
2586 timeout = schedule_timeout(30*HZ);
2587
2588 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2589 set_current_state(TASK_RUNNING);
2590
2591 if (timeout == 0)
2592 hprintk("close rx timeout cid 0x%x\n", cid);
2593
2594 HPRINTK("close rx cid 0x%x complete\n", cid);
2595
2596 }
2597
2598 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2599 volatile unsigned tsr4, tsr0;
2600 int timeout;
2601
2602 HPRINTK("close tx cid 0x%x\n", cid);
2603
2604 /* 2.1.2
2605 *
2606 * ... the host must first stop queueing packets to the TPDRQ
2607 * on the connection to be closed, then wait for all outstanding
2608 * packets to be transmitted and their buffers returned to the
2609 * TBRQ. When the last packet on the connection arrives in the
2610 * TBRQ, the host issues the close command to the adapter.
2611 */
2612
2613 while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 0) &&
2614 (retry < MAX_RETRY)) {
2615 msleep(sleep);
2616 if (sleep < 250)
2617 sleep = sleep * 2;
2618
2619 ++retry;
2620 }
2621
2622 if (tx_inuse)
2623 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2624
2625 /* 2.3.1.1 generic close operations with flush */
2626
2627 spin_lock_irqsave(&he_dev->global_lock, flags);
2628 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2629 /* also clears TSR4_SESSION_ENDED */
2630
2631 switch (vcc->qos.txtp.traffic_class) {
2632 case ATM_UBR:
2633 he_writel_tsr1(he_dev,
2634 TSR1_MCR(rate_to_atmf(200000))
2635 | TSR1_PCR(0), cid);
2636 break;
2637 case ATM_CBR:
2638 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2639 break;
2640 }
2641 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2642
2643 tpd = __alloc_tpd(he_dev);
2644 if (tpd == NULL) {
2645 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2646 goto close_tx_incomplete;
2647 }
2648 tpd->status |= TPD_EOS | TPD_INT;
2649 tpd->skb = NULL;
2650 tpd->vcc = vcc;
2651 wmb();
2652
2653 set_current_state(TASK_UNINTERRUPTIBLE);
2654 add_wait_queue(&he_vcc->tx_waitq, &wait);
2655 __enqueue_tpd(he_dev, tpd, cid);
2656 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2657
2658 timeout = schedule_timeout(30*HZ);
2659
2660 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2661 set_current_state(TASK_RUNNING);
2662
2663 spin_lock_irqsave(&he_dev->global_lock, flags);
2664
2665 if (timeout == 0) {
2666 hprintk("close tx timeout cid 0x%x\n", cid);
2667 goto close_tx_incomplete;
2668 }
2669
2670 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2671 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2672 udelay(250);
2673 }
2674
2675 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2676 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2677 udelay(250);
2678 }
2679
2680close_tx_incomplete:
2681
2682 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2683 int reg = he_vcc->rc_index;
2684
2685 HPRINTK("cs_stper reg = %d\n", reg);
2686
2687 if (he_dev->cs_stper[reg].inuse == 0)
2688 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2689 else
2690 --he_dev->cs_stper[reg].inuse;
2691
2692 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2693 }
2694 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2695
2696 HPRINTK("close tx cid 0x%x complete\n", cid);
2697 }
2698
2699 kfree(he_vcc);
2700
2701 clear_bit(ATM_VF_ADDR, &vcc->flags);
2702}
2703
2704static int
2705he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2706{
2707 unsigned long flags;
2708 struct he_dev *he_dev = HE_DEV(vcc->dev);
2709 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2710 struct he_tpd *tpd;
2711#ifdef USE_SCATTERGATHER
2712 int i, slot = 0;
2713#endif
2714
2715#define HE_TPD_BUFSIZE 0xffff
2716
2717 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2718
2719 if ((skb->len > HE_TPD_BUFSIZE) ||
2720 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2721 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2722 if (vcc->pop)
2723 vcc->pop(vcc, skb);
2724 else
2725 dev_kfree_skb_any(skb);
2726 atomic_inc(&vcc->stats->tx_err);
2727 return -EINVAL;
2728 }
2729
2730#ifndef USE_SCATTERGATHER
2731 if (skb_shinfo(skb)->nr_frags) {
2732 hprintk("no scatter/gather support\n");
2733 if (vcc->pop)
2734 vcc->pop(vcc, skb);
2735 else
2736 dev_kfree_skb_any(skb);
2737 atomic_inc(&vcc->stats->tx_err);
2738 return -EINVAL;
2739 }
2740#endif
2741 spin_lock_irqsave(&he_dev->global_lock, flags);
2742
2743 tpd = __alloc_tpd(he_dev);
2744 if (tpd == NULL) {
2745 if (vcc->pop)
2746 vcc->pop(vcc, skb);
2747 else
2748 dev_kfree_skb_any(skb);
2749 atomic_inc(&vcc->stats->tx_err);
2750 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2751 return -ENOMEM;
2752 }
2753
2754 if (vcc->qos.aal == ATM_AAL5)
2755 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2756 else {
2757 char *pti_clp = (void *) (skb->data + 3);
2758 int clp, pti;
2759
2760 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2761 clp = (*pti_clp & ATM_HDR_CLP);
2762 tpd->status |= TPD_CELLTYPE(pti);
2763 if (clp)
2764 tpd->status |= TPD_CLP;
2765
2766 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2767 }
2768
2769#ifdef USE_SCATTERGATHER
2770 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2771 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2772 tpd->iovec[slot].len = skb->len - skb->data_len;
2773 ++slot;
2774
2775 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2776 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2777
2778 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2779 tpd->vcc = vcc;
2780 tpd->skb = NULL; /* not the last fragment
2781 so dont ->push() yet */
2782 wmb();
2783
2784 __enqueue_tpd(he_dev, tpd, cid);
2785 tpd = __alloc_tpd(he_dev);
2786 if (tpd == NULL) {
2787 if (vcc->pop)
2788 vcc->pop(vcc, skb);
2789 else
2790 dev_kfree_skb_any(skb);
2791 atomic_inc(&vcc->stats->tx_err);
2792 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2793 return -ENOMEM;
2794 }
2795 tpd->status |= TPD_USERCELL;
2796 slot = 0;
2797 }
2798
2799 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2800 (void *) page_address(frag->page) + frag->page_offset,
2801 frag->size, PCI_DMA_TODEVICE);
2802 tpd->iovec[slot].len = frag->size;
2803 ++slot;
2804
2805 }
2806
2807 tpd->iovec[slot - 1].len |= TPD_LST;
2808#else
2809 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2810 tpd->length0 = skb->len | TPD_LST;
2811#endif
2812 tpd->status |= TPD_INT;
2813
2814 tpd->vcc = vcc;
2815 tpd->skb = skb;
2816 wmb();
2817 ATM_SKB(skb)->vcc = vcc;
2818
2819 __enqueue_tpd(he_dev, tpd, cid);
2820 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2821
2822 atomic_inc(&vcc->stats->tx);
2823
2824 return 0;
2825}
2826
2827static int
2828he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2829{
2830 unsigned long flags;
2831 struct he_dev *he_dev = HE_DEV(atm_dev);
2832 struct he_ioctl_reg reg;
2833 int err = 0;
2834
2835 switch (cmd) {
2836 case HE_GET_REG:
2837 if (!capable(CAP_NET_ADMIN))
2838 return -EPERM;
2839
2840 if (copy_from_user(&reg, arg,
2841 sizeof(struct he_ioctl_reg)))
2842 return -EFAULT;
2843
2844 spin_lock_irqsave(&he_dev->global_lock, flags);
2845 switch (reg.type) {
2846 case HE_REGTYPE_PCI:
2847 reg.val = he_readl(he_dev, reg.addr);
2848 break;
2849 case HE_REGTYPE_RCM:
2850 reg.val =
2851 he_readl_rcm(he_dev, reg.addr);
2852 break;
2853 case HE_REGTYPE_TCM:
2854 reg.val =
2855 he_readl_tcm(he_dev, reg.addr);
2856 break;
2857 case HE_REGTYPE_MBOX:
2858 reg.val =
2859 he_readl_mbox(he_dev, reg.addr);
2860 break;
2861 default:
2862 err = -EINVAL;
2863 break;
2864 }
2865 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2866 if (err == 0)
2867 if (copy_to_user(arg, &reg,
2868 sizeof(struct he_ioctl_reg)))
2869 return -EFAULT;
2870 break;
2871 default:
2872#ifdef CONFIG_ATM_HE_USE_SUNI
2873 if (atm_dev->phy && atm_dev->phy->ioctl)
2874 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2875#else /* CONFIG_ATM_HE_USE_SUNI */
2876 err = -EINVAL;
2877#endif /* CONFIG_ATM_HE_USE_SUNI */
2878 break;
2879 }
2880
2881 return err;
2882}
2883
2884static void
2885he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2886{
2887 unsigned long flags;
2888 struct he_dev *he_dev = HE_DEV(atm_dev);
2889
2890 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2891
2892 spin_lock_irqsave(&he_dev->global_lock, flags);
2893 he_writel(he_dev, val, FRAMER + (addr*4));
2894 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2895 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2896}
2897
2898
2899static unsigned char
2900he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2901{
2902 unsigned long flags;
2903 struct he_dev *he_dev = HE_DEV(atm_dev);
2904 unsigned reg;
2905
2906 spin_lock_irqsave(&he_dev->global_lock, flags);
2907 reg = he_readl(he_dev, FRAMER + (addr*4));
2908 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2909
2910 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2911 return reg;
2912}
2913
2914static int
2915he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2916{
2917 unsigned long flags;
2918 struct he_dev *he_dev = HE_DEV(dev);
2919 int left, i;
2920#ifdef notdef
2921 struct he_rbrq *rbrq_tail;
2922 struct he_tpdrq *tpdrq_head;
2923 int rbpl_head, rbpl_tail;
2924#endif
2925 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2926
2927
2928 left = *pos;
2929 if (!left--)
Adrian Bunk900092a2007-12-30 23:16:45 -08002930 return sprintf(page, "ATM he driver\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932 if (!left--)
2933 return sprintf(page, "%s%s\n\n",
2934 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2935
2936 if (!left--)
2937 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2938
2939 spin_lock_irqsave(&he_dev->global_lock, flags);
2940 mcc += he_readl(he_dev, MCC);
2941 oec += he_readl(he_dev, OEC);
2942 dcc += he_readl(he_dev, DCC);
2943 cec += he_readl(he_dev, CEC);
2944 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2945
2946 if (!left--)
2947 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2948 mcc, oec, dcc, cec);
2949
2950 if (!left--)
2951 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2952 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2953
2954 if (!left--)
2955 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2956 CONFIG_TPDRQ_SIZE);
2957
2958 if (!left--)
2959 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2960 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2961
2962 if (!left--)
2963 return sprintf(page, "tbrq_size = %d peak = %d\n",
2964 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2965
2966
2967#ifdef notdef
2968 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2969 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2970
2971 inuse = rbpl_head - rbpl_tail;
2972 if (inuse < 0)
2973 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2974 inuse /= sizeof(struct he_rbp);
2975
2976 if (!left--)
2977 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2978 CONFIG_RBPL_SIZE, inuse);
2979#endif
2980
2981 if (!left--)
2982 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2983
2984 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2985 if (!left--)
2986 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2987 he_dev->cs_stper[i].pcr,
2988 he_dev->cs_stper[i].inuse);
2989
2990 if (!left--)
2991 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2992 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2993
2994 return 0;
2995}
2996
2997/* eeprom routines -- see 4.7 */
2998
2999u8
3000read_prom_byte(struct he_dev *he_dev, int addr)
3001{
3002 u32 val = 0, tmp_read = 0;
3003 int i, j = 0;
3004 u8 byte_read = 0;
3005
3006 val = readl(he_dev->membase + HOST_CNTL);
3007 val &= 0xFFFFE0FF;
3008
3009 /* Turn on write enable */
3010 val |= 0x800;
3011 he_writel(he_dev, val, HOST_CNTL);
3012
3013 /* Send READ instruction */
Ahmed S. Darwish36fe55d2007-02-16 01:42:23 -08003014 for (i = 0; i < ARRAY_SIZE(readtab); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015 he_writel(he_dev, val | readtab[i], HOST_CNTL);
3016 udelay(EEPROM_DELAY);
3017 }
3018
3019 /* Next, we need to send the byte address to read from */
3020 for (i = 7; i >= 0; i--) {
3021 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3022 udelay(EEPROM_DELAY);
3023 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3024 udelay(EEPROM_DELAY);
3025 }
3026
3027 j = 0;
3028
3029 val &= 0xFFFFF7FF; /* Turn off write enable */
3030 he_writel(he_dev, val, HOST_CNTL);
3031
3032 /* Now, we can read data from the EEPROM by clocking it in */
3033 for (i = 7; i >= 0; i--) {
3034 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3035 udelay(EEPROM_DELAY);
3036 tmp_read = he_readl(he_dev, HOST_CNTL);
3037 byte_read |= (unsigned char)
3038 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3039 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3040 udelay(EEPROM_DELAY);
3041 }
3042
3043 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3044 udelay(EEPROM_DELAY);
3045
3046 return byte_read;
3047}
3048
3049MODULE_LICENSE("GPL");
3050MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3051MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3052module_param(disable64, bool, 0);
3053MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3054module_param(nvpibits, short, 0);
3055MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3056module_param(nvcibits, short, 0);
3057MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3058module_param(rx_skb_reserve, short, 0);
3059MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3060module_param(irq_coalesce, bool, 0);
3061MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3062module_param(sdh, bool, 0);
3063MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3064
3065static struct pci_device_id he_pci_tbl[] = {
3066 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3067 0, 0, 0 },
3068 { 0, }
3069};
3070
3071MODULE_DEVICE_TABLE(pci, he_pci_tbl);
3072
3073static struct pci_driver he_driver = {
3074 .name = "he",
3075 .probe = he_init_one,
3076 .remove = __devexit_p(he_remove_one),
3077 .id_table = he_pci_tbl,
3078};
3079
3080static int __init he_init(void)
3081{
3082 return pci_register_driver(&he_driver);
3083}
3084
3085static void __exit he_cleanup(void)
3086{
3087 pci_unregister_driver(&he_driver);
3088}
3089
3090module_init(he_init);
3091module_exit(he_cleanup);