1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved. 7 */ 8 #ifndef _ASM_IA64_SN_NODEPDA_H 9 #define _ASM_IA64_SN_NODEPDA_H 10 11 12 #include <linux/config.h> 13 #include <asm/sn/sgi.h> 14 #include <asm/irq.h> 15 #include <asm/sn/intr.h> 16 #include <asm/sn/router.h> 17 #include <asm/sn/pda.h> 18 #include <asm/sn/module.h> 19 #include <asm/sn/bte.h> 20 21 /* 22 * NUMA Node-Specific Data structures are defined in this file. 23 * In particular, this is the location of the node PDA. 24 * A pointer to the right node PDA is saved in each CPU PDA. 25 */ 26 27 /* 28 * Node-specific data structure. 29 * 30 * One of these structures is allocated on each node of a NUMA system. 31 * 32 * This structure provides a convenient way of keeping together 33 * all per-node data structures. 34 */ 35 36 37 38 struct nodepda_s { 39 40 41 cpuid_t node_first_cpu; /* Starting cpu number for node */ 42 /* WARNING: no guarantee that */ 43 /* the second cpu on a node is */ 44 /* node_first_cpu+1. */ 45 46 vertex_hdl_t xbow_vhdl; 47 nasid_t xbow_peer; /* NASID of our peer hub on xbow */ 48 struct semaphore xbow_sema; /* Sema for xbow synchronization */ 49 slotid_t slotdesc; 50 geoid_t geoid; 51 module_t *module; /* Pointer to containing module */ 52 xwidgetnum_t basew_id; 53 vertex_hdl_t basew_xc; 54 int hubticks; 55 int num_routers; /* XXX not setup! Total routers in the system */ 56 57 58 char *hwg_node_name; /* hwgraph node name */ 59 vertex_hdl_t node_vertex; /* Hwgraph vertex for this node */ 60 61 void *pdinfo; /* Platform-dependent per-node info */ 62 63 64 nodepda_router_info_t *npda_rip_first; 65 nodepda_router_info_t **npda_rip_last; 66 67 68 spinlock_t bist_lock; 69 70 /* 71 * The BTEs on this node are shared by the local cpus 72 */ 73 struct bteinfo_s bte_if[BTES_PER_NODE]; /* Virtual Interface */ 74 struct timer_list bte_recovery_timer; 75 spinlock_t bte_recovery_lock; 76 77 /* 78 * Array of pointers to the nodepdas for each node. 79 */ 80 struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES]; 81 82 }; 83 84 typedef struct nodepda_s nodepda_t; 85 86 struct irqpda_s { 87 int num_irq_used; 88 char irq_flags[NR_IRQS]; 89 struct pci_dev *device_dev[NR_IRQS]; 90 char share_count[NR_IRQS]; 91 struct pci_dev *current; 92 }; 93 94 typedef struct irqpda_s irqpda_t; 95 96 97 /* 98 * Access Functions for node PDA. 99 * Since there is one nodepda for each node, we need a convenient mechanism 100 * to access these nodepdas without cluttering code with #ifdefs. 101 * The next set of definitions provides this. 102 * Routines are expected to use 103 * 104 * nodepda -> to access node PDA for the node on which code is running 105 * subnodepda -> to access subnode PDA for the subnode on which code is running 106 * 107 * NODEPDA(cnode) -> to access node PDA for cnodeid 108 * SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode 109 */ 110 111 #define nodepda pda.p_nodepda /* Ptr to this node's PDA */ 112 #define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode]) 113 114 115 /* 116 * Macros to access data structures inside nodepda 117 */ 118 #define NODE_MODULEID(cnode) geo_module((NODEPDA(cnode)->geoid)) 119 #define NODE_SLOTID(cnode) (NODEPDA(cnode)->slotdesc) 120 121 122 /* 123 * Quickly convert a compact node ID into a hwgraph vertex 124 */ 125 #define cnodeid_to_vertex(cnodeid) (NODEPDA(cnodeid)->node_vertex) 126 127 128 /* 129 * Check if given a compact node id the corresponding node has all the 130 * cpus disabled. 131 */ 132 #define is_headless_node(cnode) (!test_bit(cnode, &node_has_active_cpus)) 133 134 /* 135 * Check if given a node vertex handle the corresponding node has all the 136 * cpus disabled. 137 */ 138 #define is_headless_node_vertex(_nodevhdl) \ 139 is_headless_node(nodevertex_to_cnodeid(_nodevhdl)) 140 141 142 #endif /* _ASM_IA64_SN_NODEPDA_H */ 143