1 /*
2 * SN Platform GRU Driver
3 *
4 * GRU HANDLE DEFINITION
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #ifndef __GRUHANDLES_H__
24 #define __GRUHANDLES_H__
25 #include "gru_instructions.h"
26
27 /*
28 * Manifest constants for GRU Memory Map
29 */
30 #define GRU_GSEG0_BASE 0
31 #define GRU_MCS_BASE (64 * 1024 * 1024)
32 #define GRU_SIZE (128UL * 1024 * 1024)
33
34 /* Handle & resource counts */
35 #define GRU_NUM_CB 128
36 #define GRU_NUM_DSR_BYTES (32 * 1024)
37 #define GRU_NUM_TFM 16
38 #define GRU_NUM_TGH 24
39 #define GRU_NUM_CBE 128
40 #define GRU_NUM_TFH 128
41 #define GRU_NUM_CCH 16
42
43 /* Maximum resource counts that can be reserved by user programs */
44 #define GRU_NUM_USER_CBR GRU_NUM_CBE
45 #define GRU_NUM_USER_DSR_BYTES GRU_NUM_DSR_BYTES
46
47 /* Bytes per handle & handle stride. Code assumes all cb, tfh, cbe handles
48 * are the same */
49 #define GRU_HANDLE_BYTES 64
50 #define GRU_HANDLE_STRIDE 256
51
52 /* Base addresses of handles */
53 #define GRU_TFM_BASE (GRU_MCS_BASE + 0x00000)
54 #define GRU_TGH_BASE (GRU_MCS_BASE + 0x08000)
55 #define GRU_CBE_BASE (GRU_MCS_BASE + 0x10000)
56 #define GRU_TFH_BASE (GRU_MCS_BASE + 0x18000)
57 #define GRU_CCH_BASE (GRU_MCS_BASE + 0x20000)
58
59 /* User gseg constants */
60 #define GRU_GSEG_STRIDE (4 * 1024 * 1024)
61 #define GSEG_BASE(a) ((a) & ~(GRU_GSEG_PAGESIZE - 1))
62
63 /* Data segment constants */
64 #define GRU_DSR_AU_BYTES 1024
65 #define GRU_DSR_CL (GRU_NUM_DSR_BYTES / GRU_CACHE_LINE_BYTES)
66 #define GRU_DSR_AU_CL (GRU_DSR_AU_BYTES / GRU_CACHE_LINE_BYTES)
67 #define GRU_DSR_AU (GRU_NUM_DSR_BYTES / GRU_DSR_AU_BYTES)
68
69 /* Control block constants */
70 #define GRU_CBR_AU_SIZE 2
71 #define GRU_CBR_AU (GRU_NUM_CBE / GRU_CBR_AU_SIZE)
72
73 /* Convert resource counts to the number of AU */
74 #define GRU_DS_BYTES_TO_AU(n) DIV_ROUND_UP(n, GRU_DSR_AU_BYTES)
75 #define GRU_CB_COUNT_TO_AU(n) DIV_ROUND_UP(n, GRU_CBR_AU_SIZE)
76
77 /* UV limits */
78 #define GRU_CHIPLETS_PER_HUB 2
79 #define GRU_HUBS_PER_BLADE 1
80 #define GRU_CHIPLETS_PER_BLADE (GRU_HUBS_PER_BLADE * GRU_CHIPLETS_PER_HUB)
81
82 /* User GRU Gseg offsets */
83 #define GRU_CB_BASE 0
84 #define GRU_CB_LIMIT (GRU_CB_BASE + GRU_HANDLE_STRIDE * GRU_NUM_CBE)
85 #define GRU_DS_BASE 0x20000
86 #define GRU_DS_LIMIT (GRU_DS_BASE + GRU_NUM_DSR_BYTES)
87
88 /* Convert a GRU physical address to the chiplet offset */
89 #define GSEGPOFF(h) ((h) & (GRU_SIZE - 1))
90
91 /* Convert an arbitrary handle address to the beginning of the GRU segment */
92 #define GRUBASE(h) ((void *)((unsigned long)(h) & ~(GRU_SIZE - 1)))
93
94 /* Test a valid handle address to determine the type */
95 #define TYPE_IS(hn, h) ((h) >= GRU_##hn##_BASE && (h) < \
96 GRU_##hn##_BASE + GRU_NUM_##hn * GRU_HANDLE_STRIDE && \
97 (((h) & (GRU_HANDLE_STRIDE - 1)) == 0))
98
99
100 /* General addressing macros. */
get_gseg_base_address(void * base,int ctxnum)101 static inline void *get_gseg_base_address(void *base, int ctxnum)
102 {
103 return (void *)(base + GRU_GSEG0_BASE + GRU_GSEG_STRIDE * ctxnum);
104 }
105
get_gseg_base_address_cb(void * base,int ctxnum,int line)106 static inline void *get_gseg_base_address_cb(void *base, int ctxnum, int line)
107 {
108 return (void *)(get_gseg_base_address(base, ctxnum) +
109 GRU_CB_BASE + GRU_HANDLE_STRIDE * line);
110 }
111
get_gseg_base_address_ds(void * base,int ctxnum,int line)112 static inline void *get_gseg_base_address_ds(void *base, int ctxnum, int line)
113 {
114 return (void *)(get_gseg_base_address(base, ctxnum) + GRU_DS_BASE +
115 GRU_CACHE_LINE_BYTES * line);
116 }
117
get_tfm(void * base,int ctxnum)118 static inline struct gru_tlb_fault_map *get_tfm(void *base, int ctxnum)
119 {
120 return (struct gru_tlb_fault_map *)(base + GRU_TFM_BASE +
121 ctxnum * GRU_HANDLE_STRIDE);
122 }
123
get_tgh(void * base,int ctxnum)124 static inline struct gru_tlb_global_handle *get_tgh(void *base, int ctxnum)
125 {
126 return (struct gru_tlb_global_handle *)(base + GRU_TGH_BASE +
127 ctxnum * GRU_HANDLE_STRIDE);
128 }
129
get_cbe(void * base,int ctxnum)130 static inline struct gru_control_block_extended *get_cbe(void *base, int ctxnum)
131 {
132 return (struct gru_control_block_extended *)(base + GRU_CBE_BASE +
133 ctxnum * GRU_HANDLE_STRIDE);
134 }
135
get_tfh(void * base,int ctxnum)136 static inline struct gru_tlb_fault_handle *get_tfh(void *base, int ctxnum)
137 {
138 return (struct gru_tlb_fault_handle *)(base + GRU_TFH_BASE +
139 ctxnum * GRU_HANDLE_STRIDE);
140 }
141
get_cch(void * base,int ctxnum)142 static inline struct gru_context_configuration_handle *get_cch(void *base,
143 int ctxnum)
144 {
145 return (struct gru_context_configuration_handle *)(base +
146 GRU_CCH_BASE + ctxnum * GRU_HANDLE_STRIDE);
147 }
148
get_cb_number(void * cb)149 static inline unsigned long get_cb_number(void *cb)
150 {
151 return (((unsigned long)cb - GRU_CB_BASE) % GRU_GSEG_PAGESIZE) /
152 GRU_HANDLE_STRIDE;
153 }
154
155 /* byte offset to a specific GRU chiplet. (p=pnode, c=chiplet (0 or 1)*/
gru_chiplet_paddr(unsigned long paddr,int pnode,int chiplet)156 static inline unsigned long gru_chiplet_paddr(unsigned long paddr, int pnode,
157 int chiplet)
158 {
159 return paddr + GRU_SIZE * (2 * pnode + chiplet);
160 }
161
gru_chiplet_vaddr(void * vaddr,int pnode,int chiplet)162 static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet)
163 {
164 return vaddr + GRU_SIZE * (2 * pnode + chiplet);
165 }
166
gru_tfh_to_cbe(struct gru_tlb_fault_handle * tfh)167 static inline struct gru_control_block_extended *gru_tfh_to_cbe(
168 struct gru_tlb_fault_handle *tfh)
169 {
170 unsigned long cbe;
171
172 cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE;
173 return (struct gru_control_block_extended*)cbe;
174 }
175
176
177
178
179 /*
180 * Global TLB Fault Map
181 * Bitmap of outstanding TLB misses needing interrupt/polling service.
182 *
183 */
184 struct gru_tlb_fault_map {
185 unsigned long fault_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
186 unsigned long fill0[2];
187 unsigned long done_bits[BITS_TO_LONGS(GRU_NUM_CBE)];
188 unsigned long fill1[2];
189 };
190
191 /*
192 * TGH - TLB Global Handle
193 * Used for TLB flushing.
194 *
195 */
196 struct gru_tlb_global_handle {
197 unsigned int cmd:1; /* DW 0 */
198 unsigned int delresp:1;
199 unsigned int opc:1;
200 unsigned int fill1:5;
201
202 unsigned int fill2:8;
203
204 unsigned int status:2;
205 unsigned long fill3:2;
206 unsigned int state:3;
207 unsigned long fill4:1;
208
209 unsigned int cause:3;
210 unsigned long fill5:37;
211
212 unsigned long vaddr:64; /* DW 1 */
213
214 unsigned int asid:24; /* DW 2 */
215 unsigned int fill6:8;
216
217 unsigned int pagesize:5;
218 unsigned int fill7:11;
219
220 unsigned int global:1;
221 unsigned int fill8:15;
222
223 unsigned long vaddrmask:39; /* DW 3 */
224 unsigned int fill9:9;
225 unsigned int n:10;
226 unsigned int fill10:6;
227
228 unsigned int ctxbitmap:16; /* DW4 */
229 unsigned long fill11[3];
230 };
231
232 enum gru_tgh_cmd {
233 TGHCMD_START
234 };
235
236 enum gru_tgh_opc {
237 TGHOP_TLBNOP,
238 TGHOP_TLBINV
239 };
240
241 enum gru_tgh_status {
242 TGHSTATUS_IDLE,
243 TGHSTATUS_EXCEPTION,
244 TGHSTATUS_ACTIVE
245 };
246
247 enum gru_tgh_state {
248 TGHSTATE_IDLE,
249 TGHSTATE_PE_INVAL,
250 TGHSTATE_INTERRUPT_INVAL,
251 TGHSTATE_WAITDONE,
252 TGHSTATE_RESTART_CTX,
253 };
254
255 enum gru_tgh_cause {
256 TGHCAUSE_RR_ECC,
257 TGHCAUSE_TLB_ECC,
258 TGHCAUSE_LRU_ECC,
259 TGHCAUSE_PS_ECC,
260 TGHCAUSE_MUL_ERR,
261 TGHCAUSE_DATA_ERR,
262 TGHCAUSE_SW_FORCE
263 };
264
265
266 /*
267 * TFH - TLB Global Handle
268 * Used for TLB dropins into the GRU TLB.
269 *
270 */
271 struct gru_tlb_fault_handle {
272 unsigned int cmd:1; /* DW 0 - low 32*/
273 unsigned int delresp:1;
274 unsigned int fill0:2;
275 unsigned int opc:3;
276 unsigned int fill1:9;
277
278 unsigned int status:2;
279 unsigned int fill2:2;
280 unsigned int state:3;
281 unsigned int fill3:1;
282
283 unsigned int cause:6;
284 unsigned int cb_int:1;
285 unsigned int fill4:1;
286
287 unsigned int indexway:12; /* DW 0 - high 32 */
288 unsigned int fill5:4;
289
290 unsigned int ctxnum:4;
291 unsigned int fill6:12;
292
293 unsigned long missvaddr:64; /* DW 1 */
294
295 unsigned int missasid:24; /* DW 2 */
296 unsigned int fill7:8;
297 unsigned int fillasid:24;
298 unsigned int dirty:1;
299 unsigned int gaa:2;
300 unsigned long fill8:5;
301
302 unsigned long pfn:41; /* DW 3 */
303 unsigned int fill9:7;
304 unsigned int pagesize:5;
305 unsigned int fill10:11;
306
307 unsigned long fillvaddr:64; /* DW 4 */
308
309 unsigned long fill11[3];
310 };
311
312 enum gru_tfh_opc {
313 TFHOP_NOOP,
314 TFHOP_RESTART,
315 TFHOP_WRITE_ONLY,
316 TFHOP_WRITE_RESTART,
317 TFHOP_EXCEPTION,
318 TFHOP_USER_POLLING_MODE = 7,
319 };
320
321 enum tfh_status {
322 TFHSTATUS_IDLE,
323 TFHSTATUS_EXCEPTION,
324 TFHSTATUS_ACTIVE,
325 };
326
327 enum tfh_state {
328 TFHSTATE_INACTIVE,
329 TFHSTATE_IDLE,
330 TFHSTATE_MISS_UPM,
331 TFHSTATE_MISS_FMM,
332 TFHSTATE_HW_ERR,
333 TFHSTATE_WRITE_TLB,
334 TFHSTATE_RESTART_CBR,
335 };
336
337 /* TFH cause bits */
338 enum tfh_cause {
339 TFHCAUSE_NONE,
340 TFHCAUSE_TLB_MISS,
341 TFHCAUSE_TLB_MOD,
342 TFHCAUSE_HW_ERROR_RR,
343 TFHCAUSE_HW_ERROR_MAIN_ARRAY,
344 TFHCAUSE_HW_ERROR_VALID,
345 TFHCAUSE_HW_ERROR_PAGESIZE,
346 TFHCAUSE_INSTRUCTION_EXCEPTION,
347 TFHCAUSE_UNCORRECTIBLE_ERROR,
348 };
349
350 /* GAA values */
351 #define GAA_RAM 0x0
352 #define GAA_NCRAM 0x2
353 #define GAA_MMIO 0x1
354 #define GAA_REGISTER 0x3
355
356 /* GRU paddr shift for pfn. (NOTE: shift is NOT by actual pagesize) */
357 #define GRU_PADDR_SHIFT 12
358
359 /*
360 * Context Configuration handle
361 * Used to allocate resources to a GSEG context.
362 *
363 */
364 struct gru_context_configuration_handle {
365 unsigned int cmd:1; /* DW0 */
366 unsigned int delresp:1;
367 unsigned int opc:3;
368 unsigned int unmap_enable:1;
369 unsigned int req_slice_set_enable:1;
370 unsigned int req_slice:2;
371 unsigned int cb_int_enable:1;
372 unsigned int tlb_int_enable:1;
373 unsigned int tfm_fault_bit_enable:1;
374 unsigned int tlb_int_select:4;
375
376 unsigned int status:2;
377 unsigned int state:2;
378 unsigned int reserved2:4;
379
380 unsigned int cause:4;
381 unsigned int tfm_done_bit_enable:1;
382 unsigned int unused:3;
383
384 unsigned int dsr_allocation_map;
385
386 unsigned long cbr_allocation_map; /* DW1 */
387
388 unsigned int asid[8]; /* DW 2 - 5 */
389 unsigned short sizeavail[8]; /* DW 6 - 7 */
390 } __attribute__ ((packed));
391
392 enum gru_cch_opc {
393 CCHOP_START = 1,
394 CCHOP_ALLOCATE,
395 CCHOP_INTERRUPT,
396 CCHOP_DEALLOCATE,
397 CCHOP_INTERRUPT_SYNC,
398 };
399
400 enum gru_cch_status {
401 CCHSTATUS_IDLE,
402 CCHSTATUS_EXCEPTION,
403 CCHSTATUS_ACTIVE,
404 };
405
406 enum gru_cch_state {
407 CCHSTATE_INACTIVE,
408 CCHSTATE_MAPPED,
409 CCHSTATE_ACTIVE,
410 CCHSTATE_INTERRUPTED,
411 };
412
413 /* CCH Exception cause */
414 enum gru_cch_cause {
415 CCHCAUSE_REGION_REGISTER_WRITE_ERROR = 1,
416 CCHCAUSE_ILLEGAL_OPCODE = 2,
417 CCHCAUSE_INVALID_START_REQUEST = 3,
418 CCHCAUSE_INVALID_ALLOCATION_REQUEST = 4,
419 CCHCAUSE_INVALID_DEALLOCATION_REQUEST = 5,
420 CCHCAUSE_INVALID_INTERRUPT_REQUEST = 6,
421 CCHCAUSE_CCH_BUSY = 7,
422 CCHCAUSE_NO_CBRS_TO_ALLOCATE = 8,
423 CCHCAUSE_BAD_TFM_CONFIG = 9,
424 CCHCAUSE_CBR_RESOURCES_OVERSUBSCRIPED = 10,
425 CCHCAUSE_DSR_RESOURCES_OVERSUBSCRIPED = 11,
426 CCHCAUSE_CBR_DEALLOCATION_ERROR = 12,
427 };
428 /*
429 * CBE - Control Block Extended
430 * Maintains internal GRU state for active CBs.
431 *
432 */
433 struct gru_control_block_extended {
434 unsigned int reserved0:1; /* DW 0 - low */
435 unsigned int imacpy:3;
436 unsigned int reserved1:4;
437 unsigned int xtypecpy:3;
438 unsigned int iaa0cpy:2;
439 unsigned int iaa1cpy:2;
440 unsigned int reserved2:1;
441 unsigned int opccpy:8;
442 unsigned int exopccpy:8;
443
444 unsigned int idef2cpy:22; /* DW 0 - high */
445 unsigned int reserved3:10;
446
447 unsigned int idef4cpy:22; /* DW 1 */
448 unsigned int reserved4:10;
449 unsigned int idef4upd:22;
450 unsigned int reserved5:10;
451
452 unsigned long idef1upd:64; /* DW 2 */
453
454 unsigned long idef5cpy:64; /* DW 3 */
455
456 unsigned long idef6cpy:64; /* DW 4 */
457
458 unsigned long idef3upd:64; /* DW 5 */
459
460 unsigned long idef5upd:64; /* DW 6 */
461
462 unsigned int idef2upd:22; /* DW 7 */
463 unsigned int reserved6:10;
464
465 unsigned int ecause:20;
466 unsigned int cbrstate:4;
467 unsigned int cbrexecstatus:8;
468 };
469
470 /* CBE fields for active BCOPY instructions */
471 #define cbe_baddr0 idef1upd
472 #define cbe_baddr1 idef3upd
473 #define cbe_src_cl idef6cpy
474 #define cbe_nelemcur idef5upd
475
476 enum gru_cbr_state {
477 CBRSTATE_INACTIVE,
478 CBRSTATE_IDLE,
479 CBRSTATE_PE_CHECK,
480 CBRSTATE_QUEUED,
481 CBRSTATE_WAIT_RESPONSE,
482 CBRSTATE_INTERRUPTED,
483 CBRSTATE_INTERRUPTED_MISS_FMM,
484 CBRSTATE_BUSY_INTERRUPT_MISS_FMM,
485 CBRSTATE_INTERRUPTED_MISS_UPM,
486 CBRSTATE_BUSY_INTERRUPTED_MISS_UPM,
487 CBRSTATE_REQUEST_ISSUE,
488 CBRSTATE_BUSY_INTERRUPT,
489 };
490
491 /* CBE cbrexecstatus bits - defined in gru_instructions.h*/
492 /* CBE ecause bits - defined in gru_instructions.h */
493
494 /*
495 * Convert a processor pagesize into the strange encoded pagesize used by the
496 * GRU. Processor pagesize is encoded as log of bytes per page. (or PAGE_SHIFT)
497 * pagesize log pagesize grupagesize
498 * 4k 12 0
499 * 16k 14 1
500 * 64k 16 2
501 * 256k 18 3
502 * 1m 20 4
503 * 2m 21 5
504 * 4m 22 6
505 * 16m 24 7
506 * 64m 26 8
507 * ...
508 */
509 #define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6)
510 #define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh))
511
512 /* minimum TLB purge count to ensure a full purge */
513 #define GRUMAXINVAL 1024UL
514
515 int cch_allocate(struct gru_context_configuration_handle *cch);
516 int cch_start(struct gru_context_configuration_handle *cch);
517 int cch_interrupt(struct gru_context_configuration_handle *cch);
518 int cch_deallocate(struct gru_context_configuration_handle *cch);
519 int cch_interrupt_sync(struct gru_context_configuration_handle *cch);
520 int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr,
521 unsigned long vaddrmask, int asid, int pagesize, int global, int n,
522 unsigned short ctxbitmap);
523 int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
524 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
525 void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr,
526 int gaa, unsigned long vaddr, int asid, int dirty, int pagesize);
527 void tfh_restart(struct gru_tlb_fault_handle *tfh);
528 void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh);
529 void tfh_exception(struct gru_tlb_fault_handle *tfh);
530
531 #endif /* __GRUHANDLES_H__ */
532