1 /* $Id$
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (c) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
8 */
9
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <asm/sn/sgi.h>
16 #include <asm/delay.h>
17 #include <asm/sn/intr.h>
18 #include <asm/sn/sn2/sn_private.h>
19 #include <asm/sn/sn2/shubio.h>
20 #include <asm/sn/iograph.h>
21 #include <asm/sn/invent.h>
22 #include <asm/sn/hcl.h>
23 #include <asm/sn/labelcl.h>
24 #include <asm/sn/pci/bridge.h>
25 #include <asm/sn/xtalk/xtalk_private.h>
26 #include <asm/sn/simulator.h>
27
28 /* #define DEBUG 1 */
29 /* #define XBOW_DEBUG 1 */
30 /* #define DEBUG_ERROR 1 */
31
32 #define kdebug 0
33
34
35 /*
36 * Files needed to get the device driver entry points
37 */
38
39 #include <asm/sn/xtalk/xbow.h>
40 #include <asm/sn/xtalk/xtalk.h>
41 #include <asm/sn/xtalk/xswitch.h>
42 #include <asm/sn/xtalk/xwidget.h>
43
44 #include <asm/sn/prio.h>
45 #include <asm/sn/hcl_util.h>
46
47
48 #define NEW(ptr) (ptr = kmalloc(sizeof (*(ptr)), GFP_KERNEL))
49 #define DEL(ptr) (kfree(ptr))
50
51 /*
52 * This file supports the Xbow chip. Main functions: initializtion,
53 * error handling, and GBR.
54 */
55
56 /*
57 * each vertex corresponding to an xbow chip
58 * has a "fastinfo" pointer pointing at one
59 * of these things.
60 */
61 typedef struct xbow_soft_s *xbow_soft_t;
62
63 struct xbow_soft_s {
64 vertex_hdl_t conn; /* our connection point */
65 vertex_hdl_t vhdl; /* xbow's private vertex */
66 vertex_hdl_t busv; /* the xswitch vertex */
67 xbow_t *base; /* PIO pointer to crossbow chip */
68 char *name; /* hwgraph name */
69
70 xbow_perf_t xbow_perfcnt[XBOW_PERF_COUNTERS];
71 xbow_perf_link_t xbow_perflink[MAX_XBOW_PORTS];
72 xbow_link_status_t xbow_link_status[MAX_XBOW_PORTS];
73 spinlock_t xbow_perf_lock;
74 int link_monitor;
75 widget_cfg_t *wpio[MAX_XBOW_PORTS]; /* cached PIO pointer */
76
77 /* Bandwidth allocation state. Bandwidth values are for the
78 * destination port since contention happens there.
79 * Implicit mapping from xbow ports (8..f) -> (0..7) array indices.
80 */
81 spinlock_t xbow_bw_alloc_lock; /* bw allocation lock */
82 unsigned long long bw_hiwm[MAX_XBOW_PORTS]; /* hiwater mark values */
83 unsigned long long bw_cur_used[MAX_XBOW_PORTS]; /* bw used currently */
84 };
85
86 #define xbow_soft_set(v,i) hwgraph_fastinfo_set((v), (arbitrary_info_t)(i))
87 #define xbow_soft_get(v) ((xbow_soft_t)hwgraph_fastinfo_get((v)))
88
89 /*
90 * Function Table of Contents
91 */
92
93 void xbow_mlreset(xbow_t *);
94 int xbow_attach(vertex_hdl_t);
95
96 int xbow_widget_present(xbow_t *, int);
97 static int xbow_link_alive(xbow_t *, int);
98 vertex_hdl_t xbow_widget_lookup(vertex_hdl_t, int);
99
100 void xbow_intr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
101
102
103
104 void xbow_update_perf_counters(vertex_hdl_t);
105 xbow_perf_link_t *xbow_get_perf_counters(vertex_hdl_t);
106 int xbow_enable_perf_counter(vertex_hdl_t, int, int, int);
107 xbow_link_status_t *xbow_get_llp_status(vertex_hdl_t);
108 void xbow_update_llp_status(vertex_hdl_t);
109
110 int xbow_disable_llp_monitor(vertex_hdl_t);
111 int xbow_enable_llp_monitor(vertex_hdl_t);
112 int xbow_prio_bw_alloc(vertex_hdl_t, xwidgetnum_t, xwidgetnum_t,
113 unsigned long long, unsigned long long);
114 static void xbow_setwidint(xtalk_intr_t);
115
116 xswitch_reset_link_f xbow_reset_link;
117
118 xswitch_provider_t xbow_provider =
119 {
120 xbow_reset_link,
121 };
122
123
124 static int
xbow_mmap(struct file * file,struct vm_area_struct * vma)125 xbow_mmap(struct file * file, struct vm_area_struct * vma)
126 {
127 unsigned long phys_addr;
128 int error = 0;
129
130 phys_addr = (unsigned long)file->private_data & ~0xc000000000000000; /* Mask out the Uncache bits */
131 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
132 vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;
133 error = io_remap_page_range(vma->vm_start, phys_addr,
134 vma->vm_end-vma->vm_start,
135 vma->vm_page_prot);
136 return(error);
137 }
138
139 /*
140 * This is the file operation table for the pcibr driver.
141 * As each of the functions are implemented, put the
142 * appropriate function name below.
143 */
144 struct file_operations xbow_fops = {
145 .owner = THIS_MODULE,
146 .mmap = xbow_mmap,
147 };
148
149 /*
150 * xbow_mlreset: called at mlreset time if the
151 * platform specific code determines that there is
152 * a crossbow in a critical path that must be
153 * functional before the driver would normally get
154 * the device properly set up.
155 *
156 * what do we need to do, that the boot prom can
157 * not be counted on to have already done, that is
158 * generic across all platforms using crossbows?
159 */
160 /*ARGSUSED */
161 void
xbow_mlreset(xbow_t * xbow)162 xbow_mlreset(xbow_t * xbow)
163 {
164 }
165
166 #ifdef XBRIDGE_REGS_SIM
167 /* xbow_set_simulated_regs: sets xbow regs as needed
168 * for powering through the boot
169 */
170 void
xbow_set_simulated_regs(xbow_t * xbow,int port)171 xbow_set_simulated_regs(xbow_t *xbow, int port)
172 {
173 /*
174 * turn on link
175 */
176 xbow->xb_link(port).link_status = (1<<31);
177 /*
178 * and give it a live widget too
179 */
180 xbow->xb_link(port).link_aux_status = XB_AUX_STAT_PRESENT;
181 /*
182 * zero the link control reg
183 */
184 xbow->xb_link(port).link_control = 0x0;
185 }
186 #endif /* XBRIDGE_REGS_SIM */
187
188 /*
189 * xbow_attach: the crosstalk provider has
190 * determined that there is a crossbow widget
191 * present, and has handed us the connection
192 * point for that vertex.
193 *
194 * We not only add our own vertex, but add
195 * some "xtalk switch" data to the switch
196 * vertex (at the connect point's parent) if
197 * it does not have any.
198 */
199
200 /*ARGSUSED */
201 int
xbow_attach(vertex_hdl_t conn)202 xbow_attach(vertex_hdl_t conn)
203 {
204 /*REFERENCED */
205 vertex_hdl_t vhdl;
206 vertex_hdl_t busv;
207 xbow_t *xbow;
208 xbow_soft_t soft;
209 int port;
210 xswitch_info_t info;
211 xtalk_intr_t intr_hdl;
212 char devnm[MAXDEVNAME], *s;
213 xbowreg_t id;
214 int rev;
215 int i;
216 int xbow_num;
217 static void xbow_errintr_handler(int, void *, struct pt_regs *);
218
219
220 #if DEBUG && ATTACH_DEBUG
221 #if defined(SUPPORT_PRINTING_V_FORMAT)
222 printk("%v: xbow_attach\n", conn);
223 #else
224 printk("0x%x: xbow_attach\n", conn);
225 #endif
226 #endif
227
228 /*
229 * Get a PIO pointer to the base of the crossbow
230 * chip.
231 */
232 #ifdef XBRIDGE_REGS_SIM
233 printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: allocating %ld bytes for xbow_s\n", sizeof(xbow_t));
234 xbow = (xbow_t *) kmalloc(sizeof(xbow_t), GFP_KERNEL);
235 /*
236 * turn on ports e and f like in a real live ibrick
237 */
238 xbow_set_simulated_regs(xbow, 0xe);
239 xbow_set_simulated_regs(xbow, 0xf);
240 #else
241 xbow = (xbow_t *) xtalk_piotrans_addr(conn, 0, 0, sizeof(xbow_t), 0);
242 #endif /* XBRIDGE_REGS_SIM */
243
244 /*
245 * Locate the "switch" vertex: it is the parent
246 * of our connection point.
247 */
248 busv = hwgraph_connectpt_get(conn);
249 #if DEBUG && ATTACH_DEBUG
250 printk("xbow_attach: Bus Vertex 0x%p, conn 0x%p, xbow register 0x%p wid= 0x%x\n", busv, conn, xbow, *(volatile u32 *)xbow);
251 #endif
252
253 ASSERT(busv != GRAPH_VERTEX_NONE);
254
255 /*
256 * Create our private vertex, and connect our
257 * driver information to it. This makes it possible
258 * for diagnostic drivers to open the crossbow
259 * vertex for access to registers.
260 */
261
262 /*
263 * Register a xbow driver with devfs.
264 * file ops.
265 */
266 vhdl = NULL;
267 vhdl = hwgraph_register(conn, EDGE_LBL_XBOW, 0,
268 DEVFS_FL_AUTO_DEVNUM, 0, 0,
269 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
270 (struct file_operations *)&xbow_fops, (void *)xbow);
271 if (!vhdl) {
272 printk(KERN_WARNING "xbow_attach: Unable to create char device for xbow conn %p\n",
273 (void *)conn);
274 }
275
276 /*
277 * Allocate the soft state structure and attach
278 * it to the xbow's vertex
279 */
280 NEW(soft);
281 soft->conn = conn;
282 soft->vhdl = vhdl;
283 soft->busv = busv;
284 soft->base = xbow;
285 /* does the universe really need another macro? */
286 /* xbow_soft_set(vhdl, (arbitrary_info_t) soft); */
287 /* hwgraph_fastinfo_set(vhdl, (arbitrary_info_t) soft); */
288
289 #define XBOW_NUM_SUFFIX_FORMAT "[xbow# %d]"
290
291 /* Add xbow number as a suffix to the hwgraph name of the xbow.
292 * This is helpful while looking at the error/warning messages.
293 */
294 xbow_num = 0;
295
296 /*
297 * get the name of this xbow vertex and keep the info.
298 * This is needed during errors and interupts, but as
299 * long as we have it, we can use it elsewhere.
300 */
301 s = dev_to_name(vhdl, devnm, MAXDEVNAME);
302 soft->name = kmalloc(strlen(s) + strlen(XBOW_NUM_SUFFIX_FORMAT) + 1,
303 GFP_KERNEL);
304 sprintf(soft->name,"%s"XBOW_NUM_SUFFIX_FORMAT, s,xbow_num);
305
306 #ifdef XBRIDGE_REGS_SIM
307 /* my o200/ibrick has id=0x2d002049, but XXBOW_WIDGET_PART_NUM is defined
308 * as 0xd000, so I'm using that for the partnum bitfield.
309 */
310 printk("xbow_attach: XBRIDGE_REGS_SIM FIXME: need xb_wid_id value!!\n");
311 id = 0x2d000049;
312 #else
313 id = xbow->xb_wid_id;
314 #endif /* XBRIDGE_REGS_SIM */
315 rev = XWIDGET_PART_REV_NUM(id);
316
317 spin_lock_init(&soft->xbow_perf_lock);
318 soft->xbow_perfcnt[0].xp_perf_reg = &xbow->xb_perf_ctr_a;
319 soft->xbow_perfcnt[1].xp_perf_reg = &xbow->xb_perf_ctr_b;
320
321 /* Initialization for GBR bw allocation */
322 spin_lock_init(&soft->xbow_bw_alloc_lock);
323
324 #define XBOW_8_BIT_PORT_BW_MAX (400 * 1000 * 1000) /* 400 MB/s */
325 #define XBOW_16_BIT_PORT_BW_MAX (800 * 1000 * 1000) /* 800 MB/s */
326
327 /* Set bandwidth hiwatermark and current values */
328 for (i = 0; i < MAX_XBOW_PORTS; i++) {
329 soft->bw_hiwm[i] = XBOW_16_BIT_PORT_BW_MAX; /* for now */
330 soft->bw_cur_used[i] = 0;
331 }
332
333 /*
334 * attach the crossbow error interrupt.
335 */
336 intr_hdl = xtalk_intr_alloc(conn, (device_desc_t)0, vhdl);
337 ASSERT(intr_hdl != NULL);
338
339 {
340 int irq = ((hub_intr_t)intr_hdl)->i_bit;
341 int cpu = ((hub_intr_t)intr_hdl)->i_cpuid;
342
343 intr_unreserve_level(cpu, irq);
344 ((hub_intr_t)intr_hdl)->i_bit = SGI_XBOW_ERROR;
345 }
346
347 xtalk_intr_connect(intr_hdl,
348 (intr_func_t) xbow_errintr_handler,
349 (intr_arg_t) soft,
350 (xtalk_intr_setfunc_t) xbow_setwidint,
351 (void *) xbow);
352
353 request_irq(SGI_XBOW_ERROR, (void *)xbow_errintr_handler, SA_SHIRQ, "XBOW error",
354 (intr_arg_t) soft);
355
356
357 /*
358 * Enable xbow error interrupts
359 */
360 xbow->xb_wid_control = (XB_WID_CTRL_REG_ACC_IE | XB_WID_CTRL_XTALK_IE);
361
362 /*
363 * take a census of the widgets present,
364 * leaving notes at the switch vertex.
365 */
366 info = xswitch_info_new(busv);
367
368 for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
369 port < MAX_PORT_NUM; ++port) {
370 if (!xbow_link_alive(xbow, port)) {
371 #if DEBUG && XBOW_DEBUG
372 printk(KERN_INFO "0x%p link %d is not alive\n",
373 (void *)busv, port);
374 #endif
375 continue;
376 }
377 if (!xbow_widget_present(xbow, port)) {
378 #if DEBUG && XBOW_DEBUG
379 printk(KERN_INFO "0x%p link %d is alive but no widget is present\n", (void *)busv, port);
380 #endif
381 continue;
382 }
383 #if DEBUG && XBOW_DEBUG
384 printk(KERN_INFO "0x%p link %d has a widget\n",
385 (void *)busv, port);
386 #endif
387
388 xswitch_info_link_is_ok(info, port);
389 /*
390 * Turn some error interrupts on
391 * and turn others off. The PROM has
392 * some things turned on we don't
393 * want to see (bandwidth allocation
394 * errors for instance); so if it
395 * is not listed here, it is not on.
396 */
397 xbow->xb_link(port).link_control =
398 ( (xbow->xb_link(port).link_control
399 /*
400 * Turn off these bits; they are non-fatal,
401 * but we might want to save some statistics
402 * on the frequency of these errors.
403 * XXX FIXME XXX
404 */
405 & ~XB_CTRL_RCV_CNT_OFLOW_IE
406 & ~XB_CTRL_XMT_CNT_OFLOW_IE
407 & ~XB_CTRL_BNDWDTH_ALLOC_IE
408 & ~XB_CTRL_RCV_IE)
409 /*
410 * These are the ones we want to turn on.
411 */
412 | (XB_CTRL_ILLEGAL_DST_IE
413 | XB_CTRL_OALLOC_IBUF_IE
414 | XB_CTRL_XMT_MAX_RTRY_IE
415 | XB_CTRL_MAXREQ_TOUT_IE
416 | XB_CTRL_XMT_RTRY_IE
417 | XB_CTRL_SRC_TOUT_IE) );
418 }
419
420 xswitch_provider_register(busv, &xbow_provider);
421
422 return 0; /* attach successful */
423 }
424
425 /*
426 * xbow_widget_present: See if a device is present
427 * on the specified port of this crossbow.
428 */
429 int
xbow_widget_present(xbow_t * xbow,int port)430 xbow_widget_present(xbow_t *xbow, int port)
431 {
432 if ( IS_RUNNING_ON_SIMULATOR() ) {
433 if ( (port == 14) || (port == 15) ) {
434 return 1;
435 }
436 else {
437 return 0;
438 }
439 }
440 else {
441 /* WAR: port 0xf on PIC is missing present bit */
442 if (XBOW_WAR_ENABLED(PV854827, xbow->xb_wid_id) &&
443 IS_PIC_XBOW(xbow->xb_wid_id) && port==0xf) {
444 return 1;
445 }
446 else if ( IS_PIC_XBOW(xbow->xb_wid_id) && port==0xb ) {
447 /* for opus the present bit doesn't work on port 0xb */
448 return 1;
449 }
450 return xbow->xb_link(port).link_aux_status & XB_AUX_STAT_PRESENT;
451 }
452 }
453
454 static int
xbow_link_alive(xbow_t * xbow,int port)455 xbow_link_alive(xbow_t * xbow, int port)
456 {
457 xbwX_stat_t xbow_linkstat;
458
459 xbow_linkstat.linkstatus = xbow->xb_link(port).link_status;
460 return (xbow_linkstat.link_alive);
461 }
462
463 /*
464 * xbow_widget_lookup
465 * Lookup the edges connected to the xbow specified, and
466 * retrieve the handle corresponding to the widgetnum
467 * specified.
468 * If not found, return 0.
469 */
470 vertex_hdl_t
xbow_widget_lookup(vertex_hdl_t vhdl,int widgetnum)471 xbow_widget_lookup(vertex_hdl_t vhdl,
472 int widgetnum)
473 {
474 xswitch_info_t xswitch_info;
475 vertex_hdl_t conn;
476
477 xswitch_info = xswitch_info_get(vhdl);
478 conn = xswitch_info_vhdl_get(xswitch_info, widgetnum);
479 return conn;
480 }
481
482 /*
483 * xbow_setwidint: called when xtalk
484 * is establishing or migrating our
485 * interrupt service.
486 */
487 static void
xbow_setwidint(xtalk_intr_t intr)488 xbow_setwidint(xtalk_intr_t intr)
489 {
490 xwidgetnum_t targ = xtalk_intr_target_get(intr);
491 iopaddr_t addr = xtalk_intr_addr_get(intr);
492 xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
493 xbow_t *xbow = (xbow_t *) xtalk_intr_sfarg_get(intr);
494
495 xbow_intr_preset((void *) xbow, 0, targ, addr, vect);
496 }
497
498 /*
499 * xbow_intr_preset: called during mlreset time
500 * if the platform specific code needs to route
501 * an xbow interrupt before the xtalk infrastructure
502 * is available for use.
503 *
504 * Also called from xbow_setwidint, so we don't
505 * replicate the guts of the routine.
506 *
507 * XXX- probably should be renamed xbow_wid_intr_set or
508 * something to reduce confusion.
509 */
510 /*ARGSUSED3 */
511 void
xbow_intr_preset(void * which_widget,int which_widget_intr,xwidgetnum_t targ,iopaddr_t addr,xtalk_intr_vector_t vect)512 xbow_intr_preset(void *which_widget,
513 int which_widget_intr,
514 xwidgetnum_t targ,
515 iopaddr_t addr,
516 xtalk_intr_vector_t vect)
517 {
518 xbow_t *xbow = (xbow_t *) which_widget;
519
520 xbow->xb_wid_int_upper = ((0xFF000000 & (vect << 24)) |
521 (0x000F0000 & (targ << 16)) |
522 XTALK_ADDR_TO_UPPER(addr));
523 xbow->xb_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
524
525 }
526
527 #define XEM_ADD_STR(s) printk("%s", (s))
528 #define XEM_ADD_NVAR(n,v) printk("\t%20s: 0x%llx\n", (n), ((unsigned long long)v))
529 #define XEM_ADD_VAR(v) XEM_ADD_NVAR(#v,(v))
530 #define XEM_ADD_IOEF(p,n) if (IOERROR_FIELDVALID(ioe,n)) { \
531 IOERROR_GETVALUE(p,ioe,n); \
532 XEM_ADD_NVAR("ioe." #n, p); \
533 }
534
535 int xbow_xmit_retry_errors;
536
537 int
xbow_xmit_retry_error(xbow_soft_t soft,int port)538 xbow_xmit_retry_error(xbow_soft_t soft,
539 int port)
540 {
541 xswitch_info_t info;
542 vertex_hdl_t vhdl;
543 widget_cfg_t *wid;
544 widgetreg_t id;
545 int part;
546 int mfgr;
547
548 wid = soft->wpio[port - BASE_XBOW_PORT];
549 if (wid == NULL) {
550 /* If we can't track down a PIO
551 * pointer to our widget yet,
552 * leave our caller knowing that
553 * we are interested in this
554 * interrupt if it occurs in
555 * the future.
556 */
557 info = xswitch_info_get(soft->busv);
558 if (!info)
559 return 1;
560 vhdl = xswitch_info_vhdl_get(info, port);
561 if (vhdl == GRAPH_VERTEX_NONE)
562 return 1;
563 wid = (widget_cfg_t *) xtalk_piotrans_addr
564 (vhdl, 0, 0, sizeof *wid, 0);
565 if (!wid)
566 return 1;
567 soft->wpio[port - BASE_XBOW_PORT] = wid;
568 }
569 id = wid->w_id;
570 part = XWIDGET_PART_NUM(id);
571 mfgr = XWIDGET_MFG_NUM(id);
572
573 /* If this thing is not a Bridge,
574 * do not activate the WAR, and
575 * tell our caller we do not need
576 * to be called again.
577 */
578 if ((part != BRIDGE_WIDGET_PART_NUM) ||
579 (mfgr != BRIDGE_WIDGET_MFGR_NUM)) {
580 /* FIXME: add Xbridge to the WAR.
581 * Shouldn't hurt anything. Later need to
582 * check if we can remove this.
583 */
584 if ((part != XBRIDGE_WIDGET_PART_NUM) ||
585 (mfgr != XBRIDGE_WIDGET_MFGR_NUM))
586 return 0;
587 }
588
589 /* count how many times we
590 * have picked up after
591 * LLP Transmit problems.
592 */
593 xbow_xmit_retry_errors++;
594
595 /* rewrite the control register
596 * to fix things up.
597 */
598 wid->w_control = wid->w_control;
599 wid->w_control;
600
601 return 1;
602 }
603
604 /*
605 * xbow_errintr_handler will be called if the xbow
606 * sends an interrupt request to report an error.
607 */
608 static void
xbow_errintr_handler(int irq,void * arg,struct pt_regs * ep)609 xbow_errintr_handler(int irq, void *arg, struct pt_regs *ep)
610 {
611 ioerror_t ioe[1];
612 xbow_soft_t soft = (xbow_soft_t) arg;
613 xbow_t *xbow = soft->base;
614 xbowreg_t wid_control;
615 xbowreg_t wid_stat;
616 xbowreg_t wid_err_cmdword;
617 xbowreg_t wid_err_upper;
618 xbowreg_t wid_err_lower;
619 w_err_cmd_word_u wid_err;
620 unsigned long long wid_err_addr;
621
622 int fatal = 0;
623 int dump_ioe = 0;
624 static int xbow_error_handler(void *, int, ioerror_mode_t, ioerror_t *);
625
626 wid_control = xbow->xb_wid_control;
627 wid_stat = xbow->xb_wid_stat_clr;
628 wid_err_cmdword = xbow->xb_wid_err_cmdword;
629 wid_err_upper = xbow->xb_wid_err_upper;
630 wid_err_lower = xbow->xb_wid_err_lower;
631 xbow->xb_wid_err_cmdword = 0;
632
633 wid_err_addr = wid_err_lower | (((iopaddr_t) wid_err_upper & WIDGET_ERR_UPPER_ADDR_ONLY) << 32);
634
635 if (wid_stat & XB_WID_STAT_LINK_INTR_MASK) {
636 int port;
637
638 wid_err.r = wid_err_cmdword;
639
640 for (port = MAX_PORT_NUM - MAX_XBOW_PORTS;
641 port < MAX_PORT_NUM; port++) {
642 if (wid_stat & XB_WID_STAT_LINK_INTR(port)) {
643 xb_linkregs_t *link = &(xbow->xb_link(port));
644 xbowreg_t link_control = link->link_control;
645 xbowreg_t link_status = link->link_status_clr;
646 xbowreg_t link_aux_status = link->link_aux_status;
647 xbowreg_t link_pend;
648
649 link_pend = link_status & link_control &
650 (XB_STAT_ILLEGAL_DST_ERR
651 | XB_STAT_OALLOC_IBUF_ERR
652 | XB_STAT_RCV_CNT_OFLOW_ERR
653 | XB_STAT_XMT_CNT_OFLOW_ERR
654 | XB_STAT_XMT_MAX_RTRY_ERR
655 | XB_STAT_RCV_ERR
656 | XB_STAT_XMT_RTRY_ERR
657 | XB_STAT_MAXREQ_TOUT_ERR
658 | XB_STAT_SRC_TOUT_ERR
659 );
660
661 if (link_pend & XB_STAT_ILLEGAL_DST_ERR) {
662 if (wid_err.f.sidn == port) {
663 IOERROR_INIT(ioe);
664 IOERROR_SETVALUE(ioe, widgetnum, port);
665 IOERROR_SETVALUE(ioe, xtalkaddr, wid_err_addr);
666 if (IOERROR_HANDLED ==
667 xbow_error_handler(soft,
668 IOECODE_DMA,
669 MODE_DEVERROR,
670 ioe)) {
671 link_pend &= ~XB_STAT_ILLEGAL_DST_ERR;
672 } else {
673 dump_ioe++;
674 }
675 }
676 }
677 /* Xbow/Bridge WAR:
678 * if the bridge signals an LLP Transmitter Retry,
679 * rewrite its control register.
680 * If someone else triggers this interrupt,
681 * ignore (and disable) the interrupt.
682 */
683 if (link_pend & XB_STAT_XMT_RTRY_ERR) {
684 if (!xbow_xmit_retry_error(soft, port)) {
685 link_control &= ~XB_CTRL_XMT_RTRY_IE;
686 link->link_control = link_control;
687 link->link_control; /* stall until written */
688 }
689 link_pend &= ~XB_STAT_XMT_RTRY_ERR;
690 }
691 if (link_pend) {
692 vertex_hdl_t xwidget_vhdl;
693 char *xwidget_name;
694
695 /* Get the widget name corresponding to the current
696 * xbow link.
697 */
698 xwidget_vhdl = xbow_widget_lookup(soft->busv,port);
699 xwidget_name = xwidget_name_get(xwidget_vhdl);
700
701 printk("%s port %X[%s] XIO Bus Error",
702 soft->name, port, xwidget_name);
703 if (link_status & XB_STAT_MULTI_ERR)
704 XEM_ADD_STR("\tMultiple Errors\n");
705 if (link_status & XB_STAT_ILLEGAL_DST_ERR)
706 XEM_ADD_STR("\tInvalid Packet Destination\n");
707 if (link_status & XB_STAT_OALLOC_IBUF_ERR)
708 XEM_ADD_STR("\tInput Overallocation Error\n");
709 if (link_status & XB_STAT_RCV_CNT_OFLOW_ERR)
710 XEM_ADD_STR("\tLLP receive error counter overflow\n");
711 if (link_status & XB_STAT_XMT_CNT_OFLOW_ERR)
712 XEM_ADD_STR("\tLLP transmit retry counter overflow\n");
713 if (link_status & XB_STAT_XMT_MAX_RTRY_ERR)
714 XEM_ADD_STR("\tLLP Max Transmitter Retry\n");
715 if (link_status & XB_STAT_RCV_ERR)
716 XEM_ADD_STR("\tLLP Receiver error\n");
717 if (link_status & XB_STAT_XMT_RTRY_ERR)
718 XEM_ADD_STR("\tLLP Transmitter Retry\n");
719 if (link_status & XB_STAT_MAXREQ_TOUT_ERR)
720 XEM_ADD_STR("\tMaximum Request Timeout\n");
721 if (link_status & XB_STAT_SRC_TOUT_ERR)
722 XEM_ADD_STR("\tSource Timeout Error\n");
723
724 {
725 int other_port;
726
727 for (other_port = 8; other_port < 16; ++other_port) {
728 if (link_aux_status & (1 << other_port)) {
729 /* XXX- need to go to "other_port"
730 * and clean up after the timeout?
731 */
732 XEM_ADD_VAR(other_port);
733 }
734 }
735 }
736
737 #if !DEBUG
738 if (kdebug) {
739 #endif
740 XEM_ADD_VAR(link_control);
741 XEM_ADD_VAR(link_status);
742 XEM_ADD_VAR(link_aux_status);
743
744 #if !DEBUG
745 }
746 #endif
747 fatal++;
748 }
749 }
750 }
751 }
752 if (wid_stat & wid_control & XB_WID_STAT_WIDGET0_INTR) {
753 /* we have a "widget zero" problem */
754
755 if (wid_stat & (XB_WID_STAT_MULTI_ERR
756 | XB_WID_STAT_XTALK_ERR
757 | XB_WID_STAT_REG_ACC_ERR)) {
758
759 printk("%s Port 0 XIO Bus Error",
760 soft->name);
761 if (wid_stat & XB_WID_STAT_MULTI_ERR)
762 XEM_ADD_STR("\tMultiple Error\n");
763 if (wid_stat & XB_WID_STAT_XTALK_ERR)
764 XEM_ADD_STR("\tXIO Error\n");
765 if (wid_stat & XB_WID_STAT_REG_ACC_ERR)
766 XEM_ADD_STR("\tRegister Access Error\n");
767
768 fatal++;
769 }
770 }
771 if (fatal) {
772 XEM_ADD_VAR(wid_stat);
773 XEM_ADD_VAR(wid_control);
774 XEM_ADD_VAR(wid_err_cmdword);
775 XEM_ADD_VAR(wid_err_upper);
776 XEM_ADD_VAR(wid_err_lower);
777 XEM_ADD_VAR(wid_err_addr);
778 panic("XIO Bus Error");
779 }
780 }
781
782 /*
783 * XBOW ERROR Handling routines.
784 * These get invoked as part of walking down the error handling path
785 * from hub/heart towards the I/O device that caused the error.
786 */
787
788 /*
789 * xbow_error_handler
790 * XBow error handling dispatch routine.
791 * This is the primary interface used by external world to invoke
792 * in case of an error related to a xbow.
793 * Only functionality in this layer is to identify the widget handle
794 * given the widgetnum. Otherwise, xbow does not gathers any error
795 * data.
796 */
797 static int
xbow_error_handler(void * einfo,int error_code,ioerror_mode_t mode,ioerror_t * ioerror)798 xbow_error_handler(
799 void *einfo,
800 int error_code,
801 ioerror_mode_t mode,
802 ioerror_t *ioerror)
803 {
804 int retval = IOERROR_WIDGETLEVEL;
805
806 xbow_soft_t soft = (xbow_soft_t) einfo;
807 int port;
808 vertex_hdl_t conn;
809 vertex_hdl_t busv;
810
811 xbow_t *xbow = soft->base;
812 xbowreg_t wid_stat;
813 xbowreg_t wid_err_cmdword;
814 xbowreg_t wid_err_upper;
815 xbowreg_t wid_err_lower;
816 unsigned long long wid_err_addr;
817
818 xb_linkregs_t *link;
819 xbowreg_t link_control;
820 xbowreg_t link_status;
821 xbowreg_t link_aux_status;
822
823 ASSERT(soft != 0);
824 busv = soft->busv;
825
826 #if DEBUG && ERROR_DEBUG
827 printk("%s: xbow_error_handler\n", soft->name, busv);
828 #endif
829
830 IOERROR_GETVALUE(port, ioerror, widgetnum);
831
832 if (port == 0) {
833 /* error during access to xbow:
834 * do NOT attempt to access xbow regs.
835 */
836 if (mode == MODE_DEVPROBE)
837 return IOERROR_HANDLED;
838
839 if (error_code & IOECODE_DMA) {
840 printk(KERN_ALERT
841 "DMA error blamed on Crossbow at %s\n"
842 "\tbut Crosbow never initiates DMA!",
843 soft->name);
844 }
845 if (error_code & IOECODE_PIO) {
846 iopaddr_t tmp;
847 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
848 printk(KERN_ALERT "PIO Error on XIO Bus %s\n"
849 "\tattempting to access XIO controller\n"
850 "\twith offset 0x%lx",
851 soft->name, tmp);
852 }
853 /* caller will dump contents of ioerror
854 * in DEBUG and kdebug kernels.
855 */
856
857 return retval;
858 }
859 /*
860 * error not on port zero:
861 * safe to read xbow registers.
862 */
863 wid_stat = xbow->xb_wid_stat;
864 wid_err_cmdword = xbow->xb_wid_err_cmdword;
865 wid_err_upper = xbow->xb_wid_err_upper;
866 wid_err_lower = xbow->xb_wid_err_lower;
867
868 wid_err_addr =
869 wid_err_lower
870 | (((iopaddr_t) wid_err_upper
871 & WIDGET_ERR_UPPER_ADDR_ONLY)
872 << 32);
873
874 if ((port < BASE_XBOW_PORT) ||
875 (port >= MAX_PORT_NUM)) {
876
877 if (mode == MODE_DEVPROBE)
878 return IOERROR_HANDLED;
879
880 if (error_code & IOECODE_DMA) {
881 printk(KERN_ALERT
882 "DMA error blamed on XIO port at %s/%d\n"
883 "\tbut Crossbow does not support that port",
884 soft->name, port);
885 }
886 if (error_code & IOECODE_PIO) {
887 iopaddr_t tmp;
888 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
889 printk(KERN_ALERT
890 "PIO Error on XIO Bus %s\n"
891 "\tattempting to access XIO port %d\n"
892 "\t(which Crossbow does not support)"
893 "\twith offset 0x%lx",
894 soft->name, port, tmp);
895 }
896 #if !DEBUG
897 if (kdebug) {
898 #endif
899 XEM_ADD_STR("Raw status values for Crossbow:\n");
900 XEM_ADD_VAR(wid_stat);
901 XEM_ADD_VAR(wid_err_cmdword);
902 XEM_ADD_VAR(wid_err_upper);
903 XEM_ADD_VAR(wid_err_lower);
904 XEM_ADD_VAR(wid_err_addr);
905 #if !DEBUG
906 }
907 #endif
908
909 /* caller will dump contents of ioerror
910 * in DEBUG and kdebug kernels.
911 */
912
913 return retval;
914 }
915 /* access to valid port:
916 * ok to check port status.
917 */
918
919 link = &(xbow->xb_link(port));
920 link_control = link->link_control;
921 link_status = link->link_status;
922 link_aux_status = link->link_aux_status;
923
924 /* Check that there is something present
925 * in that XIO port.
926 */
927 /* WAR: PIC widget 0xf is missing prescense bit */
928 if (XBOW_WAR_ENABLED(PV854827, xbow->xb_wid_id) &&
929 IS_PIC_XBOW(xbow->xb_wid_id) && (port==0xf))
930 ;
931 else if (IS_PIC_XBOW(xbow->xb_wid_id) && (port==0xb))
932 ; /* WAR for opus this is missing on 0xb */
933 else if (!(link_aux_status & XB_AUX_STAT_PRESENT)) {
934 /* nobody connected. */
935 if (mode == MODE_DEVPROBE)
936 return IOERROR_HANDLED;
937
938 if (error_code & IOECODE_DMA) {
939 printk(KERN_ALERT
940 "DMA error blamed on XIO port at %s/%d\n"
941 "\tbut there is no device connected there.",
942 soft->name, port);
943 }
944 if (error_code & IOECODE_PIO) {
945 iopaddr_t tmp;
946 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
947 printk(KERN_ALERT
948 "PIO Error on XIO Bus %s\n"
949 "\tattempting to access XIO port %d\n"
950 "\t(which has no device connected)"
951 "\twith offset 0x%lx",
952 soft->name, port, tmp);
953 }
954 #if !DEBUG
955 if (kdebug) {
956 #endif
957 XEM_ADD_STR("Raw status values for Crossbow:\n");
958 XEM_ADD_VAR(wid_stat);
959 XEM_ADD_VAR(wid_err_cmdword);
960 XEM_ADD_VAR(wid_err_upper);
961 XEM_ADD_VAR(wid_err_lower);
962 XEM_ADD_VAR(wid_err_addr);
963 XEM_ADD_VAR(port);
964 XEM_ADD_VAR(link_control);
965 XEM_ADD_VAR(link_status);
966 XEM_ADD_VAR(link_aux_status);
967 #if !DEBUG
968 }
969 #endif
970 return retval;
971
972 }
973 /* Check that the link is alive.
974 */
975 if (!(link_status & XB_STAT_LINKALIVE)) {
976 iopaddr_t tmp;
977 /* nobody connected. */
978 if (mode == MODE_DEVPROBE)
979 return IOERROR_HANDLED;
980
981 printk(KERN_ALERT
982 "%s%sError on XIO Bus %s port %d",
983 (error_code & IOECODE_DMA) ? "DMA " : "",
984 (error_code & IOECODE_PIO) ? "PIO " : "",
985 soft->name, port);
986
987 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
988 if ((error_code & IOECODE_PIO) &&
989 (IOERROR_FIELDVALID(ioerror, xtalkaddr))) {
990 printk("\tAccess attempted to offset 0x%lx\n", tmp);
991 }
992 if (link_aux_status & XB_AUX_LINKFAIL_RST_BAD)
993 XEM_ADD_STR("\tLink never came out of reset\n");
994 else
995 XEM_ADD_STR("\tLink failed while transferring data\n");
996
997 }
998 /* get the connection point for the widget
999 * involved in this error; if it exists and
1000 * is not our connectpoint, cycle back through
1001 * xtalk_error_handler to deliver control to
1002 * the proper handler (or to report a generic
1003 * crosstalk error).
1004 *
1005 * If the downstream handler won't handle
1006 * the problem, we let our upstream caller
1007 * deal with it, after (in DEBUG and kdebug
1008 * kernels) dumping the xbow state for this
1009 * port.
1010 */
1011 conn = xbow_widget_lookup(busv, port);
1012 if ((conn != GRAPH_VERTEX_NONE) &&
1013 (conn != soft->conn)) {
1014 retval = xtalk_error_handler(conn, error_code, mode, ioerror);
1015 if (retval == IOERROR_HANDLED)
1016 return IOERROR_HANDLED;
1017 }
1018 if (mode == MODE_DEVPROBE)
1019 return IOERROR_HANDLED;
1020
1021 if (retval == IOERROR_UNHANDLED) {
1022 iopaddr_t tmp;
1023 retval = IOERROR_PANIC;
1024
1025 printk(KERN_ALERT
1026 "%s%sError on XIO Bus %s port %d",
1027 (error_code & IOECODE_DMA) ? "DMA " : "",
1028 (error_code & IOECODE_PIO) ? "PIO " : "",
1029 soft->name, port);
1030
1031 IOERROR_GETVALUE(tmp, ioerror, xtalkaddr);
1032 if ((error_code & IOECODE_PIO) &&
1033 (IOERROR_FIELDVALID(ioerror, xtalkaddr))) {
1034 printk("\tAccess attempted to offset 0x%lx\n", tmp);
1035 }
1036 }
1037
1038 #if !DEBUG
1039 if (kdebug) {
1040 #endif
1041 XEM_ADD_STR("Raw status values for Crossbow:\n");
1042 XEM_ADD_VAR(wid_stat);
1043 XEM_ADD_VAR(wid_err_cmdword);
1044 XEM_ADD_VAR(wid_err_upper);
1045 XEM_ADD_VAR(wid_err_lower);
1046 XEM_ADD_VAR(wid_err_addr);
1047 XEM_ADD_VAR(port);
1048 XEM_ADD_VAR(link_control);
1049 XEM_ADD_VAR(link_status);
1050 XEM_ADD_VAR(link_aux_status);
1051 #if !DEBUG
1052 }
1053 #endif
1054 /* caller will dump raw ioerror data
1055 * in DEBUG and kdebug kernels.
1056 */
1057
1058 return retval;
1059 }
1060
1061 void
xbow_update_perf_counters(vertex_hdl_t vhdl)1062 xbow_update_perf_counters(vertex_hdl_t vhdl)
1063 {
1064 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1065 xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
1066 xbow_perf_link_t *xbow_plink = xbow_soft->xbow_perflink;
1067 xbow_perfcount_t perf_reg;
1068 int link, i;
1069
1070 for (i = 0; i < XBOW_PERF_COUNTERS; i++, xbow_perf++) {
1071 if (xbow_perf->xp_mode == XBOW_MONITOR_NONE)
1072 continue;
1073
1074 spin_lock(&xbow_soft->xbow_perf_lock);
1075
1076 perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
1077
1078 link = perf_reg.xb_perf.link_select;
1079
1080 (xbow_plink + link)->xlp_cumulative[xbow_perf->xp_curmode] +=
1081 ((perf_reg.xb_perf.count - xbow_perf->xp_current) & XBOW_COUNTER_MASK);
1082 xbow_perf->xp_current = perf_reg.xb_perf.count;
1083
1084 spin_unlock(&xbow_soft->xbow_perf_lock);
1085 }
1086 }
1087
1088 xbow_perf_link_t *
xbow_get_perf_counters(vertex_hdl_t vhdl)1089 xbow_get_perf_counters(vertex_hdl_t vhdl)
1090 {
1091 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1092 xbow_perf_link_t *xbow_perf_link = xbow_soft->xbow_perflink;
1093
1094 return xbow_perf_link;
1095 }
1096
1097 int
xbow_enable_perf_counter(vertex_hdl_t vhdl,int link,int mode,int counter)1098 xbow_enable_perf_counter(vertex_hdl_t vhdl, int link, int mode, int counter)
1099 {
1100 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1101 xbow_perf_t *xbow_perf = xbow_soft->xbow_perfcnt;
1102 xbow_linkctrl_t xbow_link_ctrl;
1103 xbow_t *xbow = xbow_soft->base;
1104 xbow_perfcount_t perf_reg;
1105 int i;
1106
1107 link -= BASE_XBOW_PORT;
1108 if ((link < 0) || (link >= MAX_XBOW_PORTS))
1109 return -1;
1110
1111 if ((mode < XBOW_MONITOR_NONE) || (mode > XBOW_MONITOR_DEST_LINK))
1112 return -1;
1113
1114 if ((counter < 0) || (counter >= XBOW_PERF_COUNTERS))
1115 return -1;
1116
1117 spin_lock(&xbow_soft->xbow_perf_lock);
1118
1119 if ((xbow_perf + counter)->xp_mode && mode) {
1120 spin_unlock(&xbow_soft->xbow_perf_lock);
1121 return -1;
1122 }
1123 for (i = 0; i < XBOW_PERF_COUNTERS; i++) {
1124 if (i == counter)
1125 continue;
1126 if (((xbow_perf + i)->xp_link == link) &&
1127 ((xbow_perf + i)->xp_mode)) {
1128 spin_unlock(&xbow_soft->xbow_perf_lock);
1129 return -1;
1130 }
1131 }
1132 xbow_perf += counter;
1133
1134 xbow_perf->xp_curlink = xbow_perf->xp_link = link;
1135 xbow_perf->xp_curmode = xbow_perf->xp_mode = mode;
1136
1137 xbow_link_ctrl.xbl_ctrlword = xbow->xb_link_raw[link].link_control;
1138 xbow_link_ctrl.xb_linkcontrol.perf_mode = mode;
1139 xbow->xb_link_raw[link].link_control = xbow_link_ctrl.xbl_ctrlword;
1140
1141 perf_reg.xb_counter_val = *(xbowreg_t *) xbow_perf->xp_perf_reg;
1142 perf_reg.xb_perf.link_select = link;
1143 *(xbowreg_t *) xbow_perf->xp_perf_reg = perf_reg.xb_counter_val;
1144 xbow_perf->xp_current = perf_reg.xb_perf.count;
1145
1146 spin_unlock(&xbow_soft->xbow_perf_lock);
1147 return 0;
1148 }
1149
1150 xbow_link_status_t *
xbow_get_llp_status(vertex_hdl_t vhdl)1151 xbow_get_llp_status(vertex_hdl_t vhdl)
1152 {
1153 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1154 xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
1155
1156 return xbow_llp_status;
1157 }
1158
1159 void
xbow_update_llp_status(vertex_hdl_t vhdl)1160 xbow_update_llp_status(vertex_hdl_t vhdl)
1161 {
1162 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1163 xbow_link_status_t *xbow_llp_status = xbow_soft->xbow_link_status;
1164 xbow_t *xbow;
1165 xbwX_stat_t lnk_sts;
1166 xbow_aux_link_status_t aux_sts;
1167 int link;
1168 vertex_hdl_t xwidget_vhdl;
1169 char *xwidget_name;
1170
1171 xbow = (xbow_t *) xbow_soft->base;
1172 for (link = 0; link < MAX_XBOW_PORTS; link++, xbow_llp_status++) {
1173 /* Get the widget name corresponding the current link.
1174 * Note : 0 <= link < MAX_XBOW_PORTS(8).
1175 * BASE_XBOW_PORT(0x8) <= xwidget number < MAX_PORT_NUM (0x10)
1176 */
1177 xwidget_vhdl = xbow_widget_lookup(xbow_soft->busv,link+BASE_XBOW_PORT);
1178 xwidget_name = xwidget_name_get(xwidget_vhdl);
1179 aux_sts.aux_linkstatus
1180 = xbow->xb_link_raw[link].link_aux_status;
1181 lnk_sts.linkstatus = xbow->xb_link_raw[link].link_status_clr;
1182
1183 if (lnk_sts.link_alive == 0)
1184 continue;
1185
1186 xbow_llp_status->rx_err_count +=
1187 aux_sts.xb_aux_linkstatus.rx_err_cnt;
1188
1189 xbow_llp_status->tx_retry_count +=
1190 aux_sts.xb_aux_linkstatus.tx_retry_cnt;
1191
1192 if (lnk_sts.linkstatus & ~(XB_STAT_RCV_ERR | XB_STAT_XMT_RTRY_ERR | XB_STAT_LINKALIVE)) {
1193 #ifdef LATER
1194 printk(KERN_WARNING "link %d[%s]: bad status 0x%x\n",
1195 link, xwidget_name, lnk_sts.linkstatus);
1196 #endif
1197 }
1198 }
1199 }
1200
1201 int
xbow_disable_llp_monitor(vertex_hdl_t vhdl)1202 xbow_disable_llp_monitor(vertex_hdl_t vhdl)
1203 {
1204 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1205 int port;
1206
1207 for (port = 0; port < MAX_XBOW_PORTS; port++) {
1208 xbow_soft->xbow_link_status[port].rx_err_count = 0;
1209 xbow_soft->xbow_link_status[port].tx_retry_count = 0;
1210 }
1211
1212 xbow_soft->link_monitor = 0;
1213 return 0;
1214 }
1215
1216 int
xbow_enable_llp_monitor(vertex_hdl_t vhdl)1217 xbow_enable_llp_monitor(vertex_hdl_t vhdl)
1218 {
1219 xbow_soft_t xbow_soft = xbow_soft_get(vhdl);
1220
1221 xbow_soft->link_monitor = 1;
1222 return 0;
1223 }
1224
1225
1226 int
xbow_reset_link(vertex_hdl_t xconn_vhdl)1227 xbow_reset_link(vertex_hdl_t xconn_vhdl)
1228 {
1229 xwidget_info_t widget_info;
1230 xwidgetnum_t port;
1231 xbow_t *xbow;
1232 xbowreg_t ctrl;
1233 xbwX_stat_t stat;
1234 unsigned long itick;
1235 unsigned dtick;
1236 static long ticks_to_wait = HZ / 1000;
1237
1238 widget_info = xwidget_info_get(xconn_vhdl);
1239 port = xwidget_info_id_get(widget_info);
1240
1241 #ifdef XBOW_K1PTR /* defined if we only have one xbow ... */
1242 xbow = XBOW_K1PTR;
1243 #else
1244 {
1245 vertex_hdl_t xbow_vhdl;
1246 xbow_soft_t xbow_soft;
1247
1248 hwgraph_traverse(xconn_vhdl, ".master/xtalk/0/xbow", &xbow_vhdl);
1249 xbow_soft = xbow_soft_get(xbow_vhdl);
1250 xbow = xbow_soft->base;
1251 }
1252 #endif
1253
1254 /*
1255 * This requires three PIOs (reset the link, check for the
1256 * reset, restore the control register for the link) plus
1257 * 10us to wait for the reset. We allow up to 1ms for the
1258 * widget to come out of reset before giving up and
1259 * returning a failure.
1260 */
1261 ctrl = xbow->xb_link(port).link_control;
1262 xbow->xb_link(port).link_reset = 0;
1263 itick = jiffies;
1264 while (1) {
1265 stat.linkstatus = xbow->xb_link(port).link_status;
1266 if (stat.link_alive)
1267 break;
1268 dtick = jiffies - itick;
1269 if (dtick > ticks_to_wait) {
1270 return -1; /* never came out of reset */
1271 }
1272 udelay(2); /* don't beat on link_status */
1273 }
1274 xbow->xb_link(port).link_control = ctrl;
1275 return 0;
1276 }
1277
1278 #define XBOW_ARB_RELOAD_TICKS 25
1279 /* granularity: 4 MB/s, max: 124 MB/s */
1280 #define GRANULARITY ((100 * 1000000) / XBOW_ARB_RELOAD_TICKS)
1281
1282 #define XBOW_BYTES_TO_GBR(BYTES_per_s) (int) (BYTES_per_s / GRANULARITY)
1283
1284 #define XBOW_GBR_TO_BYTES(cnt) (bandwidth_t) ((cnt) * GRANULARITY)
1285
1286 #define CEILING_BYTES_TO_GBR(gbr, bytes_per_sec) \
1287 ((XBOW_GBR_TO_BYTES(gbr) < bytes_per_sec) ? gbr+1 : gbr)
1288
1289 #define XBOW_ARB_GBR_MAX 31
1290
1291 #define ABS(x) ((x > 0) ? (x) : (-1 * x))
1292 /* absolute value */
1293
1294 int
xbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec,bandwidth_t bytes_per_sec)1295 xbow_bytes_to_gbr(bandwidth_t old_bytes_per_sec, bandwidth_t bytes_per_sec)
1296 {
1297 int gbr_granted;
1298 int new_total_gbr;
1299 int change_gbr;
1300 bandwidth_t new_total_bw;
1301
1302 #ifdef GRIO_DEBUG
1303 printk("xbow_bytes_to_gbr: old_bytes_per_sec %lld bytes_per_sec %lld\n",
1304 old_bytes_per_sec, bytes_per_sec);
1305 #endif /* GRIO_DEBUG */
1306
1307 gbr_granted = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(old_bytes_per_sec)),
1308 old_bytes_per_sec);
1309 new_total_bw = old_bytes_per_sec + bytes_per_sec;
1310 new_total_gbr = CEILING_BYTES_TO_GBR((XBOW_BYTES_TO_GBR(new_total_bw)),
1311 new_total_bw);
1312
1313 change_gbr = new_total_gbr - gbr_granted;
1314
1315 #ifdef GRIO_DEBUG
1316 printk("xbow_bytes_to_gbr: gbr_granted %d new_total_gbr %d change_gbr %d\n",
1317 gbr_granted, new_total_gbr, change_gbr);
1318 #endif /* GRIO_DEBUG */
1319
1320 return (change_gbr);
1321 }
1322
1323 /* Conversion from GBR to bytes */
1324 bandwidth_t
xbow_gbr_to_bytes(int gbr)1325 xbow_gbr_to_bytes(int gbr)
1326 {
1327 return (XBOW_GBR_TO_BYTES(gbr));
1328 }
1329
1330 /* Given the vhdl for the desired xbow, the src and dest. widget ids
1331 * and the req_bw value, this xbow driver entry point accesses the
1332 * xbow registers and allocates the desired bandwidth if available.
1333 *
1334 * If bandwidth allocation is successful, return success else return failure.
1335 */
1336 int
xbow_prio_bw_alloc(vertex_hdl_t vhdl,xwidgetnum_t src_wid,xwidgetnum_t dest_wid,unsigned long long old_alloc_bw,unsigned long long req_bw)1337 xbow_prio_bw_alloc(vertex_hdl_t vhdl,
1338 xwidgetnum_t src_wid,
1339 xwidgetnum_t dest_wid,
1340 unsigned long long old_alloc_bw,
1341 unsigned long long req_bw)
1342 {
1343 xbow_soft_t soft = xbow_soft_get(vhdl);
1344 volatile xbowreg_t *xreg;
1345 xbowreg_t mask;
1346 int error = 0;
1347 bandwidth_t old_bw_BYTES, req_bw_BYTES;
1348 xbowreg_t old_xreg;
1349 int old_bw_GBR, req_bw_GBR, new_bw_GBR;
1350
1351 #ifdef GRIO_DEBUG
1352 printk("xbow_prio_bw_alloc: vhdl %d src_wid %d dest_wid %d req_bw %lld\n",
1353 (int) vhdl, (int) src_wid, (int) dest_wid, req_bw);
1354 #endif
1355
1356 ASSERT(XBOW_WIDGET_IS_VALID(src_wid));
1357 ASSERT(XBOW_WIDGET_IS_VALID(dest_wid));
1358
1359 spin_lock(&soft->xbow_bw_alloc_lock);
1360
1361 /* Get pointer to the correct register */
1362 xreg = XBOW_PRIO_ARBREG_PTR(soft->base, dest_wid, src_wid);
1363
1364 /* Get mask for GBR count value */
1365 mask = XB_ARB_GBR_MSK << XB_ARB_GBR_SHFT(src_wid);
1366
1367 req_bw_GBR = xbow_bytes_to_gbr(old_alloc_bw, req_bw);
1368 req_bw_BYTES = (req_bw_GBR < 0) ? (-1 * xbow_gbr_to_bytes(ABS(req_bw_GBR)))
1369 : xbow_gbr_to_bytes(req_bw_GBR);
1370
1371 #ifdef GRIO_DEBUG
1372 printk("req_bw %lld req_bw_BYTES %lld req_bw_GBR %d\n",
1373 req_bw, req_bw_BYTES, req_bw_GBR);
1374 #endif /* GRIO_DEBUG */
1375
1376 old_bw_BYTES = soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS];
1377 old_xreg = *xreg;
1378 old_bw_GBR = (((*xreg) & mask) >> XB_ARB_GBR_SHFT(src_wid));
1379
1380 #ifdef GRIO_DEBUG
1381 ASSERT(XBOW_BYTES_TO_GBR(old_bw_BYTES) == old_bw_GBR);
1382
1383 printk("old_bw_BYTES %lld old_bw_GBR %d\n", old_bw_BYTES, old_bw_GBR);
1384
1385 printk("req_bw_BYTES %lld old_bw_BYTES %lld soft->bw_hiwm %lld\n",
1386 req_bw_BYTES, old_bw_BYTES,
1387 soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]);
1388
1389 #endif /* GRIO_DEBUG */
1390
1391 /* Accept the request only if we don't exceed the destination
1392 * port HIWATER_MARK *AND* the max. link GBR arbitration count
1393 */
1394 if (((old_bw_BYTES + req_bw_BYTES) <=
1395 soft->bw_hiwm[(int) dest_wid - MAX_XBOW_PORTS]) &&
1396 (req_bw_GBR + old_bw_GBR <= XBOW_ARB_GBR_MAX)) {
1397
1398 new_bw_GBR = (old_bw_GBR + req_bw_GBR);
1399
1400 /* Set this in the xbow link register */
1401 *xreg = (old_xreg & ~mask) | \
1402 (new_bw_GBR << XB_ARB_GBR_SHFT(src_wid) & mask);
1403
1404 soft->bw_cur_used[(int) dest_wid - MAX_XBOW_PORTS] =
1405 xbow_gbr_to_bytes(new_bw_GBR);
1406 } else {
1407 error = 1;
1408 }
1409
1410 spin_unlock(&soft->xbow_bw_alloc_lock);
1411
1412 return (error);
1413 }
1414