1/* entry.S: FR-V entry 2 * 3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * 12 * Entry to the kernel is "interesting": 13 * (1) There are no stack pointers, not even for the kernel 14 * (2) General Registers should not be clobbered 15 * (3) There are no kernel-only data registers 16 * (4) Since all addressing modes are wrt to a General Register, no global 17 * variables can be reached 18 * 19 * We deal with this by declaring that we shall kill GR28 on entering the 20 * kernel from userspace 21 * 22 * However, since break interrupts can interrupt the CPU even when PSR.ET==0, 23 * they can't rely on GR28 to be anything useful, and so need to clobber a 24 * separate register (GR31). Break interrupts are managed in break.S 25 * 26 * GR29 _is_ saved, and holds the current task pointer globally 27 * 28 */ 29 30#include <linux/linkage.h> 31#include <asm/thread_info.h> 32#include <asm/setup.h> 33#include <asm/segment.h> 34#include <asm/ptrace.h> 35#include <asm/errno.h> 36#include <asm/cache.h> 37#include <asm/spr-regs.h> 38 39#define nr_syscalls ((syscall_table_size)/4) 40 41 .section .text..entry 42 .balign 4 43 44.macro LEDS val 45# sethi.p %hi(0xe1200004),gr30 46# setlo %lo(0xe1200004),gr30 47# setlos #~\val,gr31 48# st gr31,@(gr30,gr0) 49# sethi.p %hi(0xffc00100),gr30 50# setlo %lo(0xffc00100),gr30 51# sth gr0,@(gr30,gr0) 52# membar 53.endm 54 55.macro LEDS32 56# not gr31,gr31 57# sethi.p %hi(0xe1200004),gr30 58# setlo %lo(0xe1200004),gr30 59# st.p gr31,@(gr30,gr0) 60# srli gr31,#16,gr31 61# sethi.p %hi(0xffc00100),gr30 62# setlo %lo(0xffc00100),gr30 63# sth gr31,@(gr30,gr0) 64# membar 65.endm 66 67############################################################################### 68# 69# entry point for External interrupts received whilst executing userspace code 70# 71############################################################################### 72 .globl __entry_uspace_external_interrupt 73 .type __entry_uspace_external_interrupt,@function 74__entry_uspace_external_interrupt: 75 LEDS 0x6200 76 sethi.p %hi(__kernel_frame0_ptr),gr28 77 setlo %lo(__kernel_frame0_ptr),gr28 78 ldi @(gr28,#0),gr28 79 80 # handle h/w single-step through exceptions 81 sti gr0,@(gr28,#REG__STATUS) 82 83 .globl __entry_uspace_external_interrupt_reentry 84__entry_uspace_external_interrupt_reentry: 85 LEDS 0x6201 86 87 setlos #REG__END,gr30 88 dcpl gr28,gr30,#0 89 90 # finish building the exception frame 91 sti sp, @(gr28,#REG_SP) 92 stdi gr2, @(gr28,#REG_GR(2)) 93 stdi gr4, @(gr28,#REG_GR(4)) 94 stdi gr6, @(gr28,#REG_GR(6)) 95 stdi gr8, @(gr28,#REG_GR(8)) 96 stdi gr10,@(gr28,#REG_GR(10)) 97 stdi gr12,@(gr28,#REG_GR(12)) 98 stdi gr14,@(gr28,#REG_GR(14)) 99 stdi gr16,@(gr28,#REG_GR(16)) 100 stdi gr18,@(gr28,#REG_GR(18)) 101 stdi gr20,@(gr28,#REG_GR(20)) 102 stdi gr22,@(gr28,#REG_GR(22)) 103 stdi gr24,@(gr28,#REG_GR(24)) 104 stdi gr26,@(gr28,#REG_GR(26)) 105 sti gr0, @(gr28,#REG_GR(28)) 106 sti gr29,@(gr28,#REG_GR(29)) 107 stdi.p gr30,@(gr28,#REG_GR(30)) 108 109 # set up the kernel stack pointer 110 ori gr28,0,sp 111 112 movsg tbr ,gr20 113 movsg psr ,gr22 114 movsg pcsr,gr21 115 movsg isr ,gr23 116 movsg ccr ,gr24 117 movsg cccr,gr25 118 movsg lr ,gr26 119 movsg lcr ,gr27 120 121 setlos.p #-1,gr4 122 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */ 123 andi.p gr22,#~(PSR_PS|PSR_S),gr6 124 slli gr5,#1,gr5 125 or gr6,gr5,gr5 126 andi gr5,#~PSR_ET,gr5 127 128 sti gr20,@(gr28,#REG_TBR) 129 sti gr21,@(gr28,#REG_PC) 130 sti gr5 ,@(gr28,#REG_PSR) 131 sti gr23,@(gr28,#REG_ISR) 132 stdi gr24,@(gr28,#REG_CCR) 133 stdi gr26,@(gr28,#REG_LR) 134 sti gr4 ,@(gr28,#REG_SYSCALLNO) 135 136 movsg iacc0h,gr4 137 movsg iacc0l,gr5 138 stdi gr4,@(gr28,#REG_IACC0) 139 140 movsg gner0,gr4 141 movsg gner1,gr5 142 stdi.p gr4,@(gr28,#REG_GNER0) 143 144 # interrupts start off fully disabled in the interrupt handler 145 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ 146 147 # set up kernel global registers 148 sethi.p %hi(__kernel_current_task),gr5 149 setlo %lo(__kernel_current_task),gr5 150 sethi.p %hi(_gp),gr16 151 setlo %lo(_gp),gr16 152 ldi @(gr5,#0),gr29 153 ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info 154 155 # make sure we (the kernel) get div-zero and misalignment exceptions 156 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5 157 movgs gr5,isr 158 159 # switch to the kernel trap table 160 sethi.p %hi(__entry_kerneltrap_table),gr6 161 setlo %lo(__entry_kerneltrap_table),gr6 162 movgs gr6,tbr 163 164 # set the return address 165 sethi.p %hi(__entry_return_from_user_interrupt),gr4 166 setlo %lo(__entry_return_from_user_interrupt),gr4 167 movgs gr4,lr 168 169 # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions 170 movsg psr,gr4 171 172 ori gr4,#PSR_PIL_14,gr4 173 movgs gr4,psr 174 ori gr4,#PSR_PIL_14|PSR_ET,gr4 175 movgs gr4,psr 176 177 LEDS 0x6202 178 bra do_IRQ 179 180 .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt 181 182############################################################################### 183# 184# entry point for External interrupts received whilst executing kernel code 185# - on arriving here, the following registers should already be set up: 186# GR15 - current thread_info struct pointer 187# GR16 - kernel GP-REL pointer 188# GR29 - current task struct pointer 189# TBR - kernel trap vector table 190# ISR - kernel's preferred integer controls 191# 192############################################################################### 193 .globl __entry_kernel_external_interrupt 194 .type __entry_kernel_external_interrupt,@function 195__entry_kernel_external_interrupt: 196 LEDS 0x6210 197// sub sp,gr15,gr31 198// LEDS32 199 200 # set up the stack pointer 201 or.p sp,gr0,gr30 202 subi sp,#REG__END,sp 203 sti gr30,@(sp,#REG_SP) 204 205 # handle h/w single-step through exceptions 206 sti gr0,@(sp,#REG__STATUS) 207 208 .globl __entry_kernel_external_interrupt_reentry 209__entry_kernel_external_interrupt_reentry: 210 LEDS 0x6211 211 212 # set up the exception frame 213 setlos #REG__END,gr30 214 dcpl sp,gr30,#0 215 216 sti.p gr28,@(sp,#REG_GR(28)) 217 ori sp,0,gr28 218 219 # finish building the exception frame 220 stdi gr2,@(gr28,#REG_GR(2)) 221 stdi gr4,@(gr28,#REG_GR(4)) 222 stdi gr6,@(gr28,#REG_GR(6)) 223 stdi gr8,@(gr28,#REG_GR(8)) 224 stdi gr10,@(gr28,#REG_GR(10)) 225 stdi gr12,@(gr28,#REG_GR(12)) 226 stdi gr14,@(gr28,#REG_GR(14)) 227 stdi gr16,@(gr28,#REG_GR(16)) 228 stdi gr18,@(gr28,#REG_GR(18)) 229 stdi gr20,@(gr28,#REG_GR(20)) 230 stdi gr22,@(gr28,#REG_GR(22)) 231 stdi gr24,@(gr28,#REG_GR(24)) 232 stdi gr26,@(gr28,#REG_GR(26)) 233 sti gr29,@(gr28,#REG_GR(29)) 234 stdi.p gr30,@(gr28,#REG_GR(30)) 235 236 # note virtual interrupts will be fully enabled upon return 237 subicc gr0,#1,gr0,icc2 /* clear Z, set C */ 238 239 movsg tbr ,gr20 240 movsg psr ,gr22 241 movsg pcsr,gr21 242 movsg isr ,gr23 243 movsg ccr ,gr24 244 movsg cccr,gr25 245 movsg lr ,gr26 246 movsg lcr ,gr27 247 248 setlos.p #-1,gr4 249 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */ 250 andi.p gr22,#~(PSR_PS|PSR_S),gr6 251 slli gr5,#1,gr5 252 or gr6,gr5,gr5 253 andi.p gr5,#~PSR_ET,gr5 254 255 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel 256 # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt 257 andi gr25,#~0xc0,gr25 258 259 sti gr20,@(gr28,#REG_TBR) 260 sti gr21,@(gr28,#REG_PC) 261 sti gr5 ,@(gr28,#REG_PSR) 262 sti gr23,@(gr28,#REG_ISR) 263 stdi gr24,@(gr28,#REG_CCR) 264 stdi gr26,@(gr28,#REG_LR) 265 sti gr4 ,@(gr28,#REG_SYSCALLNO) 266 267 movsg iacc0h,gr4 268 movsg iacc0l,gr5 269 stdi gr4,@(gr28,#REG_IACC0) 270 271 movsg gner0,gr4 272 movsg gner1,gr5 273 stdi.p gr4,@(gr28,#REG_GNER0) 274 275 # interrupts start off fully disabled in the interrupt handler 276 subcc gr0,gr0,gr0,icc2 /* set Z and clear C */ 277 278 # set the return address 279 sethi.p %hi(__entry_return_from_kernel_interrupt),gr4 280 setlo %lo(__entry_return_from_kernel_interrupt),gr4 281 movgs gr4,lr 282 283 # clear power-saving mode flags 284 movsg hsr0,gr4 285 andi gr4,#~HSR0_PDM,gr4 286 movgs gr4,hsr0 287 288 # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions 289 movsg psr,gr4 290 ori gr4,#PSR_PIL_14,gr4 291 movgs gr4,psr 292 ori gr4,#PSR_ET,gr4 293 movgs gr4,psr 294 295 LEDS 0x6212 296 bra do_IRQ 297 298 .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt 299 300############################################################################### 301# 302# deal with interrupts that were actually virtually disabled 303# - we need to really disable them, flag the fact and return immediately 304# - if you change this, you must alter break.S also 305# 306############################################################################### 307 .balign L1_CACHE_BYTES 308 .globl __entry_kernel_external_interrupt_virtually_disabled 309 .type __entry_kernel_external_interrupt_virtually_disabled,@function 310__entry_kernel_external_interrupt_virtually_disabled: 311 movsg psr,gr30 312 andi gr30,#~PSR_PIL,gr30 313 ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only 314 movgs gr30,psr 315 subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C 316 rett #0 317 318 .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled 319 320############################################################################### 321# 322# deal with re-enablement of interrupts that were pending when virtually re-enabled 323# - set ICC2.C, re-enable the real interrupts and return 324# - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI] 325# - if you change this, you must alter break.S also 326# 327############################################################################### 328 .balign L1_CACHE_BYTES 329 .globl __entry_kernel_external_interrupt_virtual_reenable 330 .type __entry_kernel_external_interrupt_virtual_reenable,@function 331__entry_kernel_external_interrupt_virtual_reenable: 332 movsg psr,gr30 333 andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts 334 movgs gr30,psr 335 subicc gr0,#1,gr0,icc2 ; clear Z, set C 336 rett #0 337 338 .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable 339 340############################################################################### 341# 342# entry point for Software and Progam interrupts generated whilst executing userspace code 343# 344############################################################################### 345 .globl __entry_uspace_softprog_interrupt 346 .type __entry_uspace_softprog_interrupt,@function 347 .globl __entry_uspace_handle_mmu_fault 348__entry_uspace_softprog_interrupt: 349 LEDS 0x6000 350#ifdef CONFIG_MMU 351 movsg ear0,gr28 352__entry_uspace_handle_mmu_fault: 353 movgs gr28,scr2 354#endif 355 sethi.p %hi(__kernel_frame0_ptr),gr28 356 setlo %lo(__kernel_frame0_ptr),gr28 357 ldi @(gr28,#0),gr28 358 359 # handle h/w single-step through exceptions 360 sti gr0,@(gr28,#REG__STATUS) 361 362 .globl __entry_uspace_softprog_interrupt_reentry 363__entry_uspace_softprog_interrupt_reentry: 364 LEDS 0x6001 365 366 setlos #REG__END,gr30 367 dcpl gr28,gr30,#0 368 369 # set up the kernel stack pointer 370 sti.p sp,@(gr28,#REG_SP) 371 ori gr28,0,sp 372 sti gr0,@(gr28,#REG_GR(28)) 373 374 stdi gr20,@(gr28,#REG_GR(20)) 375 stdi gr22,@(gr28,#REG_GR(22)) 376 377 movsg tbr,gr20 378 movsg pcsr,gr21 379 movsg psr,gr22 380 381 sethi.p %hi(__entry_return_from_user_exception),gr23 382 setlo %lo(__entry_return_from_user_exception),gr23 383 384 bra __entry_common 385 386 .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt 387 388 # single-stepping was disabled on entry to a TLB handler that then faulted 389#ifdef CONFIG_MMU 390 .globl __entry_uspace_handle_mmu_fault_sstep 391__entry_uspace_handle_mmu_fault_sstep: 392 movgs gr28,scr2 393 sethi.p %hi(__kernel_frame0_ptr),gr28 394 setlo %lo(__kernel_frame0_ptr),gr28 395 ldi @(gr28,#0),gr28 396 397 # flag single-step re-enablement 398 sti gr0,@(gr28,#REG__STATUS) 399 bra __entry_uspace_softprog_interrupt_reentry 400#endif 401 402 403############################################################################### 404# 405# entry point for Software and Progam interrupts generated whilst executing kernel code 406# 407############################################################################### 408 .globl __entry_kernel_softprog_interrupt 409 .type __entry_kernel_softprog_interrupt,@function 410__entry_kernel_softprog_interrupt: 411 LEDS 0x6004 412 413#ifdef CONFIG_MMU 414 movsg ear0,gr30 415 movgs gr30,scr2 416#endif 417 418 .globl __entry_kernel_handle_mmu_fault 419__entry_kernel_handle_mmu_fault: 420 # set up the stack pointer 421 subi sp,#REG__END,sp 422 sti sp,@(sp,#REG_SP) 423 sti sp,@(sp,#REG_SP-4) 424 andi sp,#~7,sp 425 426 # handle h/w single-step through exceptions 427 sti gr0,@(sp,#REG__STATUS) 428 429 .globl __entry_kernel_softprog_interrupt_reentry 430__entry_kernel_softprog_interrupt_reentry: 431 LEDS 0x6005 432 433 setlos #REG__END,gr30 434 dcpl sp,gr30,#0 435 436 # set up the exception frame 437 sti.p gr28,@(sp,#REG_GR(28)) 438 ori sp,0,gr28 439 440 stdi gr20,@(gr28,#REG_GR(20)) 441 stdi gr22,@(gr28,#REG_GR(22)) 442 443 ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */ 444 addi gr22,#REG__END,gr22 445 sti gr22,@(sp,#REG_SP) 446 447 # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel 448 # - for an explanation of how it works, see: Documentation/frv/atomic-ops.txt 449 movsg cccr,gr20 450 andi gr20,#~0xc0,gr20 451 movgs gr20,cccr 452 453 movsg tbr,gr20 454 movsg pcsr,gr21 455 movsg psr,gr22 456 457 sethi.p %hi(__entry_return_from_kernel_exception),gr23 458 setlo %lo(__entry_return_from_kernel_exception),gr23 459 bra __entry_common 460 461 .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt 462 463 # single-stepping was disabled on entry to a TLB handler that then faulted 464#ifdef CONFIG_MMU 465 .globl __entry_kernel_handle_mmu_fault_sstep 466__entry_kernel_handle_mmu_fault_sstep: 467 # set up the stack pointer 468 subi sp,#REG__END,sp 469 sti sp,@(sp,#REG_SP) 470 sti sp,@(sp,#REG_SP-4) 471 andi sp,#~7,sp 472 473 # flag single-step re-enablement 474 sethi #REG__STATUS_STEP,gr30 475 sti gr30,@(sp,#REG__STATUS) 476 bra __entry_kernel_softprog_interrupt_reentry 477#endif 478 479 480############################################################################### 481# 482# the rest of the kernel entry point code 483# - on arriving here, the following registers should be set up: 484# GR1 - kernel stack pointer 485# GR7 - syscall number (trap 0 only) 486# GR8-13 - syscall args (trap 0 only) 487# GR20 - saved TBR 488# GR21 - saved PC 489# GR22 - saved PSR 490# GR23 - return handler address 491# GR28 - exception frame on stack 492# SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451) 493# PSR - PSR.S 1, PSR.ET 0 494# 495############################################################################### 496 .globl __entry_common 497 .type __entry_common,@function 498__entry_common: 499 LEDS 0x6008 500 501 # finish building the exception frame 502 stdi gr2,@(gr28,#REG_GR(2)) 503 stdi gr4,@(gr28,#REG_GR(4)) 504 stdi gr6,@(gr28,#REG_GR(6)) 505 stdi gr8,@(gr28,#REG_GR(8)) 506 stdi gr10,@(gr28,#REG_GR(10)) 507 stdi gr12,@(gr28,#REG_GR(12)) 508 stdi gr14,@(gr28,#REG_GR(14)) 509 stdi gr16,@(gr28,#REG_GR(16)) 510 stdi gr18,@(gr28,#REG_GR(18)) 511 stdi gr24,@(gr28,#REG_GR(24)) 512 stdi gr26,@(gr28,#REG_GR(26)) 513 sti gr29,@(gr28,#REG_GR(29)) 514 stdi gr30,@(gr28,#REG_GR(30)) 515 516 movsg lcr ,gr27 517 movsg lr ,gr26 518 movgs gr23,lr 519 movsg cccr,gr25 520 movsg ccr ,gr24 521 movsg isr ,gr23 522 523 setlos.p #-1,gr4 524 andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */ 525 andi.p gr22,#~(PSR_PS|PSR_S),gr6 526 slli gr5,#1,gr5 527 or gr6,gr5,gr5 528 andi gr5,#~PSR_ET,gr5 529 530 sti gr20,@(gr28,#REG_TBR) 531 sti gr21,@(gr28,#REG_PC) 532 sti gr5 ,@(gr28,#REG_PSR) 533 sti gr23,@(gr28,#REG_ISR) 534 stdi gr24,@(gr28,#REG_CCR) 535 stdi gr26,@(gr28,#REG_LR) 536 sti gr4 ,@(gr28,#REG_SYSCALLNO) 537 538 movsg iacc0h,gr4 539 movsg iacc0l,gr5 540 stdi gr4,@(gr28,#REG_IACC0) 541 542 movsg gner0,gr4 543 movsg gner1,gr5 544 stdi.p gr4,@(gr28,#REG_GNER0) 545 546 # set up virtual interrupt disablement 547 subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */ 548 549 # set up kernel global registers 550 sethi.p %hi(__kernel_current_task),gr5 551 setlo %lo(__kernel_current_task),gr5 552 sethi.p %hi(_gp),gr16 553 setlo %lo(_gp),gr16 554 ldi @(gr5,#0),gr29 555 ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info 556 557 # switch to the kernel trap table 558 sethi.p %hi(__entry_kerneltrap_table),gr6 559 setlo %lo(__entry_kerneltrap_table),gr6 560 movgs gr6,tbr 561 562 # make sure we (the kernel) get div-zero and misalignment exceptions 563 setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5 564 movgs gr5,isr 565 566 # clear power-saving mode flags 567 movsg hsr0,gr4 568 andi gr4,#~HSR0_PDM,gr4 569 movgs gr4,hsr0 570 571 # multiplex again using old TBR as a guide 572 setlos.p #TBR_TT,gr3 573 sethi %hi(__entry_vector_table),gr6 574 and.p gr20,gr3,gr5 575 setlo %lo(__entry_vector_table),gr6 576 srli gr5,#2,gr5 577 ld @(gr5,gr6),gr5 578 579 LEDS 0x6009 580 jmpl @(gr5,gr0) 581 582 583 .size __entry_common,.-__entry_common 584 585############################################################################### 586# 587# handle instruction MMU fault 588# 589############################################################################### 590#ifdef CONFIG_MMU 591 .globl __entry_insn_mmu_fault 592__entry_insn_mmu_fault: 593 LEDS 0x6010 594 setlos #0,gr8 595 movsg esr0,gr9 596 movsg scr2,gr10 597 598 # now that we've accessed the exception regs, we can enable exceptions 599 movsg psr,gr4 600 ori gr4,#PSR_ET,gr4 601 movgs gr4,psr 602 603 sethi.p %hi(do_page_fault),gr5 604 setlo %lo(do_page_fault),gr5 605 jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0) 606#endif 607 608 609############################################################################### 610# 611# handle instruction access error 612# 613############################################################################### 614 .globl __entry_insn_access_error 615__entry_insn_access_error: 616 LEDS 0x6011 617 sethi.p %hi(insn_access_error),gr5 618 setlo %lo(insn_access_error),gr5 619 movsg esfr1,gr8 620 movsg epcr0,gr9 621 movsg esr0,gr10 622 623 # now that we've accessed the exception regs, we can enable exceptions 624 movsg psr,gr4 625 ori gr4,#PSR_ET,gr4 626 movgs gr4,psr 627 jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0) 628 629############################################################################### 630# 631# handle various instructions of dubious legality 632# 633############################################################################### 634 .globl __entry_unsupported_trap 635 .globl __entry_illegal_instruction 636 .globl __entry_privileged_instruction 637 .globl __entry_debug_exception 638__entry_unsupported_trap: 639 subi gr21,#4,gr21 640 sti gr21,@(gr28,#REG_PC) 641__entry_illegal_instruction: 642__entry_privileged_instruction: 643__entry_debug_exception: 644 LEDS 0x6012 645 sethi.p %hi(illegal_instruction),gr5 646 setlo %lo(illegal_instruction),gr5 647 movsg esfr1,gr8 648 movsg epcr0,gr9 649 movsg esr0,gr10 650 651 # now that we've accessed the exception regs, we can enable exceptions 652 movsg psr,gr4 653 ori gr4,#PSR_ET,gr4 654 movgs gr4,psr 655 jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0) 656 657############################################################################### 658# 659# handle atomic operation emulation for userspace 660# 661############################################################################### 662 .globl __entry_atomic_op 663__entry_atomic_op: 664 LEDS 0x6012 665 sethi.p %hi(atomic_operation),gr5 666 setlo %lo(atomic_operation),gr5 667 movsg esfr1,gr8 668 movsg epcr0,gr9 669 movsg esr0,gr10 670 671 # now that we've accessed the exception regs, we can enable exceptions 672 movsg psr,gr4 673 ori gr4,#PSR_ET,gr4 674 movgs gr4,psr 675 jmpl @(gr5,gr0) ; call atomic_operation(esfr1,epcr0,esr0) 676 677############################################################################### 678# 679# handle media exception 680# 681############################################################################### 682 .globl __entry_media_exception 683__entry_media_exception: 684 LEDS 0x6013 685 sethi.p %hi(media_exception),gr5 686 setlo %lo(media_exception),gr5 687 movsg msr0,gr8 688 movsg msr1,gr9 689 690 # now that we've accessed the exception regs, we can enable exceptions 691 movsg psr,gr4 692 ori gr4,#PSR_ET,gr4 693 movgs gr4,psr 694 jmpl @(gr5,gr0) ; call media_excep(msr0,msr1) 695 696############################################################################### 697# 698# handle data MMU fault 699# handle data DAT fault (write-protect exception) 700# 701############################################################################### 702#ifdef CONFIG_MMU 703 .globl __entry_data_mmu_fault 704__entry_data_mmu_fault: 705 .globl __entry_data_dat_fault 706__entry_data_dat_fault: 707 LEDS 0x6014 708 setlos #1,gr8 709 movsg esr0,gr9 710 movsg scr2,gr10 ; saved EAR0 711 712 # now that we've accessed the exception regs, we can enable exceptions 713 movsg psr,gr4 714 ori gr4,#PSR_ET,gr4 715 movgs gr4,psr 716 717 sethi.p %hi(do_page_fault),gr5 718 setlo %lo(do_page_fault),gr5 719 jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0) 720#endif 721 722############################################################################### 723# 724# handle data and instruction access exceptions 725# 726############################################################################### 727 .globl __entry_insn_access_exception 728 .globl __entry_data_access_exception 729__entry_insn_access_exception: 730__entry_data_access_exception: 731 LEDS 0x6016 732 sethi.p %hi(memory_access_exception),gr5 733 setlo %lo(memory_access_exception),gr5 734 movsg esr0,gr8 735 movsg scr2,gr9 ; saved EAR0 736 movsg epcr0,gr10 737 738 # now that we've accessed the exception regs, we can enable exceptions 739 movsg psr,gr4 740 ori gr4,#PSR_ET,gr4 741 movgs gr4,psr 742 jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0) 743 744############################################################################### 745# 746# handle data access error 747# 748############################################################################### 749 .globl __entry_data_access_error 750__entry_data_access_error: 751 LEDS 0x6016 752 sethi.p %hi(data_access_error),gr5 753 setlo %lo(data_access_error),gr5 754 movsg esfr1,gr8 755 movsg esr15,gr9 756 movsg ear15,gr10 757 758 # now that we've accessed the exception regs, we can enable exceptions 759 movsg psr,gr4 760 ori gr4,#PSR_ET,gr4 761 movgs gr4,psr 762 jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15) 763 764############################################################################### 765# 766# handle data store error 767# 768############################################################################### 769 .globl __entry_data_store_error 770__entry_data_store_error: 771 LEDS 0x6017 772 sethi.p %hi(data_store_error),gr5 773 setlo %lo(data_store_error),gr5 774 movsg esfr1,gr8 775 movsg esr14,gr9 776 777 # now that we've accessed the exception regs, we can enable exceptions 778 movsg psr,gr4 779 ori gr4,#PSR_ET,gr4 780 movgs gr4,psr 781 jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14) 782 783############################################################################### 784# 785# handle division exception 786# 787############################################################################### 788 .globl __entry_division_exception 789__entry_division_exception: 790 LEDS 0x6018 791 sethi.p %hi(division_exception),gr5 792 setlo %lo(division_exception),gr5 793 movsg esfr1,gr8 794 movsg esr0,gr9 795 movsg isr,gr10 796 797 # now that we've accessed the exception regs, we can enable exceptions 798 movsg psr,gr4 799 ori gr4,#PSR_ET,gr4 800 movgs gr4,psr 801 jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr) 802 803############################################################################### 804# 805# handle compound exception 806# 807############################################################################### 808 .globl __entry_compound_exception 809__entry_compound_exception: 810 LEDS 0x6019 811 sethi.p %hi(compound_exception),gr5 812 setlo %lo(compound_exception),gr5 813 movsg esfr1,gr8 814 movsg esr0,gr9 815 movsg esr14,gr10 816 movsg esr15,gr11 817 movsg msr0,gr12 818 movsg msr1,gr13 819 820 # now that we've accessed the exception regs, we can enable exceptions 821 movsg psr,gr4 822 ori gr4,#PSR_ET,gr4 823 movgs gr4,psr 824 jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1) 825 826############################################################################### 827# 828# handle interrupts and NMIs 829# 830############################################################################### 831 .globl __entry_do_IRQ 832__entry_do_IRQ: 833 LEDS 0x6020 834 835 # we can enable exceptions 836 movsg psr,gr4 837 ori gr4,#PSR_ET,gr4 838 movgs gr4,psr 839 bra do_IRQ 840 841 .globl __entry_do_NMI 842__entry_do_NMI: 843 LEDS 0x6021 844 845 # we can enable exceptions 846 movsg psr,gr4 847 ori gr4,#PSR_ET,gr4 848 movgs gr4,psr 849 bra do_NMI 850 851############################################################################### 852# 853# the return path for a newly forked child process 854# - __switch_to() saved the old current pointer in GR8 for us 855# 856############################################################################### 857 .globl ret_from_fork 858ret_from_fork: 859 LEDS 0x6100 860 call schedule_tail 861 862 # fork & co. return 0 to child 863 setlos.p #0,gr8 864 bra __syscall_exit 865 866################################################################################################### 867# 868# Return to user mode is not as complex as all this looks, 869# but we want the default path for a system call return to 870# go as quickly as possible which is why some of this is 871# less clear than it otherwise should be. 872# 873################################################################################################### 874 .balign L1_CACHE_BYTES 875 .globl system_call 876system_call: 877 LEDS 0x6101 878 movsg psr,gr4 ; enable exceptions 879 ori gr4,#PSR_ET,gr4 880 movgs gr4,psr 881 882 sti gr7,@(gr28,#REG_SYSCALLNO) 883 sti.p gr8,@(gr28,#REG_ORIG_GR8) 884 885 subicc gr7,#nr_syscalls,gr0,icc0 886 bnc icc0,#0,__syscall_badsys 887 888 ldi @(gr15,#TI_FLAGS),gr4 889 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 890 bne icc0,#0,__syscall_trace_entry 891 892__syscall_call: 893 slli.p gr7,#2,gr7 894 sethi %hi(sys_call_table),gr5 895 setlo %lo(sys_call_table),gr5 896 ld @(gr5,gr7),gr4 897 calll @(gr4,gr0) 898 899 900############################################################################### 901# 902# return to interrupted process 903# 904############################################################################### 905__syscall_exit: 906 LEDS 0x6300 907 908 sti gr8,@(gr28,#REG_GR(8)) ; save return value 909 910 # rebuild saved psr - execve will change it for init/main.c 911 ldi @(gr28,#REG_PSR),gr22 912 srli gr22,#1,gr5 913 andi.p gr22,#~PSR_PS,gr22 914 andi gr5,#PSR_PS,gr5 915 or gr5,gr22,gr22 916 ori gr22,#PSR_S,gr22 917 918 # keep current PSR in GR23 919 movsg psr,gr23 920 921 # make sure we don't miss an interrupt setting need_resched or sigpending between 922 # sampling and the RETT 923 ori gr23,#PSR_PIL_14,gr23 924 movgs gr23,psr 925 926 ldi @(gr15,#TI_FLAGS),gr4 927 sethi.p %hi(_TIF_ALLWORK_MASK),gr5 928 setlo %lo(_TIF_ALLWORK_MASK),gr5 929 andcc gr4,gr5,gr0,icc0 930 bne icc0,#0,__syscall_exit_work 931 932 # restore all registers and return 933__entry_return_direct: 934 LEDS 0x6301 935 936 andi gr22,#~PSR_ET,gr22 937 movgs gr22,psr 938 939 ldi @(gr28,#REG_ISR),gr23 940 lddi @(gr28,#REG_CCR),gr24 941 lddi @(gr28,#REG_LR) ,gr26 942 ldi @(gr28,#REG_PC) ,gr21 943 ldi @(gr28,#REG_TBR),gr20 944 945 movgs gr20,tbr 946 movgs gr21,pcsr 947 movgs gr23,isr 948 movgs gr24,ccr 949 movgs gr25,cccr 950 movgs gr26,lr 951 movgs gr27,lcr 952 953 lddi @(gr28,#REG_GNER0),gr4 954 movgs gr4,gner0 955 movgs gr5,gner1 956 957 lddi @(gr28,#REG_IACC0),gr4 958 movgs gr4,iacc0h 959 movgs gr5,iacc0l 960 961 lddi @(gr28,#REG_GR(4)) ,gr4 962 lddi @(gr28,#REG_GR(6)) ,gr6 963 lddi @(gr28,#REG_GR(8)) ,gr8 964 lddi @(gr28,#REG_GR(10)),gr10 965 lddi @(gr28,#REG_GR(12)),gr12 966 lddi @(gr28,#REG_GR(14)),gr14 967 lddi @(gr28,#REG_GR(16)),gr16 968 lddi @(gr28,#REG_GR(18)),gr18 969 lddi @(gr28,#REG_GR(20)),gr20 970 lddi @(gr28,#REG_GR(22)),gr22 971 lddi @(gr28,#REG_GR(24)),gr24 972 lddi @(gr28,#REG_GR(26)),gr26 973 ldi @(gr28,#REG_GR(29)),gr29 974 lddi @(gr28,#REG_GR(30)),gr30 975 976 # check to see if a debugging return is required 977 LEDS 0x67f0 978 movsg ccr,gr2 979 ldi @(gr28,#REG__STATUS),gr3 980 andicc gr3,#REG__STATUS_STEP,gr0,icc0 981 bne icc0,#0,__entry_return_singlestep 982 movgs gr2,ccr 983 984 ldi @(gr28,#REG_SP) ,sp 985 lddi @(gr28,#REG_GR(2)) ,gr2 986 ldi @(gr28,#REG_GR(28)),gr28 987 988 LEDS 0x67fe 989// movsg pcsr,gr31 990// LEDS32 991 992#if 0 993 # store the current frame in the workram on the FR451 994 movgs gr28,scr2 995 sethi.p %hi(0xfe800000),gr28 996 setlo %lo(0xfe800000),gr28 997 998 stdi gr2,@(gr28,#REG_GR(2)) 999 stdi gr4,@(gr28,#REG_GR(4)) 1000 stdi gr6,@(gr28,#REG_GR(6)) 1001 stdi gr8,@(gr28,#REG_GR(8)) 1002 stdi gr10,@(gr28,#REG_GR(10)) 1003 stdi gr12,@(gr28,#REG_GR(12)) 1004 stdi gr14,@(gr28,#REG_GR(14)) 1005 stdi gr16,@(gr28,#REG_GR(16)) 1006 stdi gr18,@(gr28,#REG_GR(18)) 1007 stdi gr24,@(gr28,#REG_GR(24)) 1008 stdi gr26,@(gr28,#REG_GR(26)) 1009 sti gr29,@(gr28,#REG_GR(29)) 1010 stdi gr30,@(gr28,#REG_GR(30)) 1011 1012 movsg tbr ,gr30 1013 sti gr30,@(gr28,#REG_TBR) 1014 movsg pcsr,gr30 1015 sti gr30,@(gr28,#REG_PC) 1016 movsg psr ,gr30 1017 sti gr30,@(gr28,#REG_PSR) 1018 movsg isr ,gr30 1019 sti gr30,@(gr28,#REG_ISR) 1020 movsg ccr ,gr30 1021 movsg cccr,gr31 1022 stdi gr30,@(gr28,#REG_CCR) 1023 movsg lr ,gr30 1024 movsg lcr ,gr31 1025 stdi gr30,@(gr28,#REG_LR) 1026 sti gr0 ,@(gr28,#REG_SYSCALLNO) 1027 movsg scr2,gr28 1028#endif 1029 1030 rett #0 1031 1032 # return via break.S 1033__entry_return_singlestep: 1034 movgs gr2,ccr 1035 lddi @(gr28,#REG_GR(2)) ,gr2 1036 ldi @(gr28,#REG_SP) ,sp 1037 ldi @(gr28,#REG_GR(28)),gr28 1038 LEDS 0x67ff 1039 break 1040 .globl __entry_return_singlestep_breaks_here 1041__entry_return_singlestep_breaks_here: 1042 nop 1043 1044 1045############################################################################### 1046# 1047# return to a process interrupted in kernel space 1048# - we need to consider preemption if that is enabled 1049# 1050############################################################################### 1051 .balign L1_CACHE_BYTES 1052__entry_return_from_kernel_exception: 1053 LEDS 0x6302 1054 movsg psr,gr23 1055 ori gr23,#PSR_PIL_14,gr23 1056 movgs gr23,psr 1057 bra __entry_return_direct 1058 1059 .balign L1_CACHE_BYTES 1060__entry_return_from_kernel_interrupt: 1061 LEDS 0x6303 1062 movsg psr,gr23 1063 ori gr23,#PSR_PIL_14,gr23 1064 movgs gr23,psr 1065 1066#ifdef CONFIG_PREEMPT 1067 ldi @(gr15,#TI_PRE_COUNT),gr5 1068 subicc gr5,#0,gr0,icc0 1069 beq icc0,#0,__entry_return_direct 1070 1071__entry_preempt_need_resched: 1072 ldi @(gr15,#TI_FLAGS),gr4 1073 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 1074 beq icc0,#1,__entry_return_direct 1075 1076 setlos #PREEMPT_ACTIVE,gr5 1077 sti gr5,@(gr15,#TI_FLAGS) 1078 1079 andi gr23,#~PSR_PIL,gr23 1080 movgs gr23,psr 1081 1082 call schedule 1083 sti gr0,@(gr15,#TI_PRE_COUNT) 1084 1085 movsg psr,gr23 1086 ori gr23,#PSR_PIL_14,gr23 1087 movgs gr23,psr 1088 bra __entry_preempt_need_resched 1089#else 1090 bra __entry_return_direct 1091#endif 1092 1093 1094############################################################################### 1095# 1096# perform work that needs to be done immediately before resumption 1097# 1098############################################################################### 1099 .globl __entry_return_from_user_exception 1100 .balign L1_CACHE_BYTES 1101__entry_return_from_user_exception: 1102 LEDS 0x6501 1103 1104__entry_resume_userspace: 1105 # make sure we don't miss an interrupt setting need_resched or sigpending between 1106 # sampling and the RETT 1107 movsg psr,gr23 1108 ori gr23,#PSR_PIL_14,gr23 1109 movgs gr23,psr 1110 1111__entry_return_from_user_interrupt: 1112 LEDS 0x6402 1113 ldi @(gr15,#TI_FLAGS),gr4 1114 sethi.p %hi(_TIF_WORK_MASK),gr5 1115 setlo %lo(_TIF_WORK_MASK),gr5 1116 andcc gr4,gr5,gr0,icc0 1117 beq icc0,#1,__entry_return_direct 1118 1119__entry_work_pending: 1120 LEDS 0x6404 1121 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 1122 beq icc0,#1,__entry_work_notifysig 1123 1124__entry_work_resched: 1125 LEDS 0x6408 1126 movsg psr,gr23 1127 andi gr23,#~PSR_PIL,gr23 1128 movgs gr23,psr 1129 call schedule 1130 movsg psr,gr23 1131 ori gr23,#PSR_PIL_14,gr23 1132 movgs gr23,psr 1133 1134 LEDS 0x6401 1135 ldi @(gr15,#TI_FLAGS),gr4 1136 sethi.p %hi(_TIF_WORK_MASK),gr5 1137 setlo %lo(_TIF_WORK_MASK),gr5 1138 andcc gr4,gr5,gr0,icc0 1139 beq icc0,#1,__entry_return_direct 1140 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 1141 bne icc0,#1,__entry_work_resched 1142 1143__entry_work_notifysig: 1144 LEDS 0x6410 1145 ori.p gr4,#0,gr8 1146 call do_notify_resume 1147 bra __entry_resume_userspace 1148 1149 # perform syscall entry tracing 1150__syscall_trace_entry: 1151 LEDS 0x6320 1152 call syscall_trace_entry 1153 1154 lddi.p @(gr28,#REG_GR(8)) ,gr8 1155 ori gr8,#0,gr7 ; syscall_trace_entry() returned new syscallno 1156 lddi @(gr28,#REG_GR(10)),gr10 1157 lddi.p @(gr28,#REG_GR(12)),gr12 1158 1159 subicc gr7,#nr_syscalls,gr0,icc0 1160 bnc icc0,#0,__syscall_badsys 1161 bra __syscall_call 1162 1163 # perform syscall exit tracing 1164__syscall_exit_work: 1165 LEDS 0x6340 1166 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 1167 beq icc0,#1,__entry_work_pending 1168 1169 movsg psr,gr23 1170 andi gr23,#~PSR_PIL,gr23 ; could let syscall_trace_exit() call schedule() 1171 movgs gr23,psr 1172 1173 call syscall_trace_exit 1174 bra __entry_resume_userspace 1175 1176__syscall_badsys: 1177 LEDS 0x6380 1178 setlos #-ENOSYS,gr8 1179 sti gr8,@(gr28,#REG_GR(8)) ; save return value 1180 bra __entry_resume_userspace 1181 1182 1183############################################################################### 1184# 1185# syscall vector table 1186# 1187############################################################################### 1188 .section .rodata 1189ALIGN 1190 .globl sys_call_table 1191sys_call_table: 1192 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ 1193 .long sys_exit 1194 .long sys_fork 1195 .long sys_read 1196 .long sys_write 1197 .long sys_open /* 5 */ 1198 .long sys_close 1199 .long sys_waitpid 1200 .long sys_creat 1201 .long sys_link 1202 .long sys_unlink /* 10 */ 1203 .long sys_execve 1204 .long sys_chdir 1205 .long sys_time 1206 .long sys_mknod 1207 .long sys_chmod /* 15 */ 1208 .long sys_lchown16 1209 .long sys_ni_syscall /* old break syscall holder */ 1210 .long sys_stat 1211 .long sys_lseek 1212 .long sys_getpid /* 20 */ 1213 .long sys_mount 1214 .long sys_oldumount 1215 .long sys_setuid16 1216 .long sys_getuid16 1217 .long sys_ni_syscall // sys_stime /* 25 */ 1218 .long sys_ptrace 1219 .long sys_alarm 1220 .long sys_fstat 1221 .long sys_pause 1222 .long sys_utime /* 30 */ 1223 .long sys_ni_syscall /* old stty syscall holder */ 1224 .long sys_ni_syscall /* old gtty syscall holder */ 1225 .long sys_access 1226 .long sys_nice 1227 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ 1228 .long sys_sync 1229 .long sys_kill 1230 .long sys_rename 1231 .long sys_mkdir 1232 .long sys_rmdir /* 40 */ 1233 .long sys_dup 1234 .long sys_pipe 1235 .long sys_times 1236 .long sys_ni_syscall /* old prof syscall holder */ 1237 .long sys_brk /* 45 */ 1238 .long sys_setgid16 1239 .long sys_getgid16 1240 .long sys_ni_syscall // sys_signal 1241 .long sys_geteuid16 1242 .long sys_getegid16 /* 50 */ 1243 .long sys_acct 1244 .long sys_umount /* recycled never used phys( */ 1245 .long sys_ni_syscall /* old lock syscall holder */ 1246 .long sys_ioctl 1247 .long sys_fcntl /* 55 */ 1248 .long sys_ni_syscall /* old mpx syscall holder */ 1249 .long sys_setpgid 1250 .long sys_ni_syscall /* old ulimit syscall holder */ 1251 .long sys_ni_syscall /* old old uname syscall */ 1252 .long sys_umask /* 60 */ 1253 .long sys_chroot 1254 .long sys_ustat 1255 .long sys_dup2 1256 .long sys_getppid 1257 .long sys_getpgrp /* 65 */ 1258 .long sys_setsid 1259 .long sys_sigaction 1260 .long sys_ni_syscall // sys_sgetmask 1261 .long sys_ni_syscall // sys_ssetmask 1262 .long sys_setreuid16 /* 70 */ 1263 .long sys_setregid16 1264 .long sys_sigsuspend 1265 .long sys_ni_syscall // sys_sigpending 1266 .long sys_sethostname 1267 .long sys_setrlimit /* 75 */ 1268 .long sys_ni_syscall // sys_old_getrlimit 1269 .long sys_getrusage 1270 .long sys_gettimeofday 1271 .long sys_settimeofday 1272 .long sys_getgroups16 /* 80 */ 1273 .long sys_setgroups16 1274 .long sys_ni_syscall /* old_select slot */ 1275 .long sys_symlink 1276 .long sys_lstat 1277 .long sys_readlink /* 85 */ 1278 .long sys_uselib 1279 .long sys_swapon 1280 .long sys_reboot 1281 .long sys_ni_syscall // old_readdir 1282 .long sys_ni_syscall /* 90 */ /* old_mmap slot */ 1283 .long sys_munmap 1284 .long sys_truncate 1285 .long sys_ftruncate 1286 .long sys_fchmod 1287 .long sys_fchown16 /* 95 */ 1288 .long sys_getpriority 1289 .long sys_setpriority 1290 .long sys_ni_syscall /* old profil syscall holder */ 1291 .long sys_statfs 1292 .long sys_fstatfs /* 100 */ 1293 .long sys_ni_syscall /* ioperm for i386 */ 1294 .long sys_socketcall 1295 .long sys_syslog 1296 .long sys_setitimer 1297 .long sys_getitimer /* 105 */ 1298 .long sys_newstat 1299 .long sys_newlstat 1300 .long sys_newfstat 1301 .long sys_ni_syscall /* obsolete olduname( syscall */ 1302 .long sys_ni_syscall /* iopl for i386 */ /* 110 */ 1303 .long sys_vhangup 1304 .long sys_ni_syscall /* obsolete idle( syscall */ 1305 .long sys_ni_syscall /* vm86old for i386 */ 1306 .long sys_wait4 1307 .long sys_swapoff /* 115 */ 1308 .long sys_sysinfo 1309 .long sys_ipc 1310 .long sys_fsync 1311 .long sys_sigreturn 1312 .long sys_clone /* 120 */ 1313 .long sys_setdomainname 1314 .long sys_newuname 1315 .long sys_ni_syscall /* old "cacheflush" */ 1316 .long sys_adjtimex 1317 .long sys_mprotect /* 125 */ 1318 .long sys_sigprocmask 1319 .long sys_ni_syscall /* old "create_module" */ 1320 .long sys_init_module 1321 .long sys_delete_module 1322 .long sys_ni_syscall /* old "get_kernel_syms" */ 1323 .long sys_quotactl 1324 .long sys_getpgid 1325 .long sys_fchdir 1326 .long sys_bdflush 1327 .long sys_sysfs /* 135 */ 1328 .long sys_personality 1329 .long sys_ni_syscall /* for afs_syscall */ 1330 .long sys_setfsuid16 1331 .long sys_setfsgid16 1332 .long sys_llseek /* 140 */ 1333 .long sys_getdents 1334 .long sys_select 1335 .long sys_flock 1336 .long sys_msync 1337 .long sys_readv /* 145 */ 1338 .long sys_writev 1339 .long sys_getsid 1340 .long sys_fdatasync 1341 .long sys_sysctl 1342 .long sys_mlock /* 150 */ 1343 .long sys_munlock 1344 .long sys_mlockall 1345 .long sys_munlockall 1346 .long sys_sched_setparam 1347 .long sys_sched_getparam /* 155 */ 1348 .long sys_sched_setscheduler 1349 .long sys_sched_getscheduler 1350 .long sys_sched_yield 1351 .long sys_sched_get_priority_max 1352 .long sys_sched_get_priority_min /* 160 */ 1353 .long sys_sched_rr_get_interval 1354 .long sys_nanosleep 1355 .long sys_mremap 1356 .long sys_setresuid16 1357 .long sys_getresuid16 /* 165 */ 1358 .long sys_ni_syscall /* for vm86 */ 1359 .long sys_ni_syscall /* Old sys_query_module */ 1360 .long sys_poll 1361 .long sys_ni_syscall /* Old nfsservctl */ 1362 .long sys_setresgid16 /* 170 */ 1363 .long sys_getresgid16 1364 .long sys_prctl 1365 .long sys_rt_sigreturn 1366 .long sys_rt_sigaction 1367 .long sys_rt_sigprocmask /* 175 */ 1368 .long sys_rt_sigpending 1369 .long sys_rt_sigtimedwait 1370 .long sys_rt_sigqueueinfo 1371 .long sys_rt_sigsuspend 1372 .long sys_pread64 /* 180 */ 1373 .long sys_pwrite64 1374 .long sys_chown16 1375 .long sys_getcwd 1376 .long sys_capget 1377 .long sys_capset /* 185 */ 1378 .long sys_sigaltstack 1379 .long sys_sendfile 1380 .long sys_ni_syscall /* streams1 */ 1381 .long sys_ni_syscall /* streams2 */ 1382 .long sys_vfork /* 190 */ 1383 .long sys_getrlimit 1384 .long sys_mmap2 1385 .long sys_truncate64 1386 .long sys_ftruncate64 1387 .long sys_stat64 /* 195 */ 1388 .long sys_lstat64 1389 .long sys_fstat64 1390 .long sys_lchown 1391 .long sys_getuid 1392 .long sys_getgid /* 200 */ 1393 .long sys_geteuid 1394 .long sys_getegid 1395 .long sys_setreuid 1396 .long sys_setregid 1397 .long sys_getgroups /* 205 */ 1398 .long sys_setgroups 1399 .long sys_fchown 1400 .long sys_setresuid 1401 .long sys_getresuid 1402 .long sys_setresgid /* 210 */ 1403 .long sys_getresgid 1404 .long sys_chown 1405 .long sys_setuid 1406 .long sys_setgid 1407 .long sys_setfsuid /* 215 */ 1408 .long sys_setfsgid 1409 .long sys_pivot_root 1410 .long sys_mincore 1411 .long sys_madvise 1412 .long sys_getdents64 /* 220 */ 1413 .long sys_fcntl64 1414 .long sys_ni_syscall /* reserved for TUX */ 1415 .long sys_ni_syscall /* Reserved for Security */ 1416 .long sys_gettid 1417 .long sys_readahead /* 225 */ 1418 .long sys_setxattr 1419 .long sys_lsetxattr 1420 .long sys_fsetxattr 1421 .long sys_getxattr 1422 .long sys_lgetxattr /* 230 */ 1423 .long sys_fgetxattr 1424 .long sys_listxattr 1425 .long sys_llistxattr 1426 .long sys_flistxattr 1427 .long sys_removexattr /* 235 */ 1428 .long sys_lremovexattr 1429 .long sys_fremovexattr 1430 .long sys_tkill 1431 .long sys_sendfile64 1432 .long sys_futex /* 240 */ 1433 .long sys_sched_setaffinity 1434 .long sys_sched_getaffinity 1435 .long sys_ni_syscall //sys_set_thread_area 1436 .long sys_ni_syscall //sys_get_thread_area 1437 .long sys_io_setup /* 245 */ 1438 .long sys_io_destroy 1439 .long sys_io_getevents 1440 .long sys_io_submit 1441 .long sys_io_cancel 1442 .long sys_fadvise64 /* 250 */ 1443 .long sys_ni_syscall 1444 .long sys_exit_group 1445 .long sys_lookup_dcookie 1446 .long sys_epoll_create 1447 .long sys_epoll_ctl /* 255 */ 1448 .long sys_epoll_wait 1449 .long sys_remap_file_pages 1450 .long sys_set_tid_address 1451 .long sys_timer_create 1452 .long sys_timer_settime /* 260 */ 1453 .long sys_timer_gettime 1454 .long sys_timer_getoverrun 1455 .long sys_timer_delete 1456 .long sys_clock_settime 1457 .long sys_clock_gettime /* 265 */ 1458 .long sys_clock_getres 1459 .long sys_clock_nanosleep 1460 .long sys_statfs64 1461 .long sys_fstatfs64 1462 .long sys_tgkill /* 270 */ 1463 .long sys_utimes 1464 .long sys_fadvise64_64 1465 .long sys_ni_syscall /* sys_vserver */ 1466 .long sys_mbind 1467 .long sys_get_mempolicy 1468 .long sys_set_mempolicy 1469 .long sys_mq_open 1470 .long sys_mq_unlink 1471 .long sys_mq_timedsend 1472 .long sys_mq_timedreceive /* 280 */ 1473 .long sys_mq_notify 1474 .long sys_mq_getsetattr 1475 .long sys_ni_syscall /* reserved for kexec */ 1476 .long sys_waitid 1477 .long sys_ni_syscall /* 285 */ /* available */ 1478 .long sys_add_key 1479 .long sys_request_key 1480 .long sys_keyctl 1481 .long sys_ioprio_set 1482 .long sys_ioprio_get /* 290 */ 1483 .long sys_inotify_init 1484 .long sys_inotify_add_watch 1485 .long sys_inotify_rm_watch 1486 .long sys_migrate_pages 1487 .long sys_openat /* 295 */ 1488 .long sys_mkdirat 1489 .long sys_mknodat 1490 .long sys_fchownat 1491 .long sys_futimesat 1492 .long sys_fstatat64 /* 300 */ 1493 .long sys_unlinkat 1494 .long sys_renameat 1495 .long sys_linkat 1496 .long sys_symlinkat 1497 .long sys_readlinkat /* 305 */ 1498 .long sys_fchmodat 1499 .long sys_faccessat 1500 .long sys_pselect6 1501 .long sys_ppoll 1502 .long sys_unshare /* 310 */ 1503 .long sys_set_robust_list 1504 .long sys_get_robust_list 1505 .long sys_splice 1506 .long sys_sync_file_range 1507 .long sys_tee /* 315 */ 1508 .long sys_vmsplice 1509 .long sys_move_pages 1510 .long sys_getcpu 1511 .long sys_epoll_pwait 1512 .long sys_utimensat /* 320 */ 1513 .long sys_signalfd 1514 .long sys_timerfd_create 1515 .long sys_eventfd 1516 .long sys_fallocate 1517 .long sys_timerfd_settime /* 325 */ 1518 .long sys_timerfd_gettime 1519 .long sys_signalfd4 1520 .long sys_eventfd2 1521 .long sys_epoll_create1 1522 .long sys_dup3 /* 330 */ 1523 .long sys_pipe2 1524 .long sys_inotify_init1 1525 .long sys_preadv 1526 .long sys_pwritev 1527 .long sys_rt_tgsigqueueinfo /* 335 */ 1528 .long sys_perf_event_open 1529 .long sys_setns 1530 1531syscall_table_size = (. - sys_call_table) 1532