Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation, version 2. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 11 | * NON INFRINGEMENT. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * Linux interrupt vectors. |
| 15 | */ |
| 16 | |
| 17 | #include <linux/linkage.h> |
| 18 | #include <linux/errno.h> |
| 19 | #include <linux/init.h> |
Chris Metcalf | 9f9c038 | 2010-06-25 17:00:56 -0400 | [diff] [blame] | 20 | #include <linux/unistd.h> |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 21 | #include <asm/ptrace.h> |
| 22 | #include <asm/thread_info.h> |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 23 | #include <asm/irqflags.h> |
| 24 | #include <asm/atomic.h> |
| 25 | #include <asm/asm-offsets.h> |
| 26 | #include <hv/hypervisor.h> |
| 27 | #include <arch/abi.h> |
| 28 | #include <arch/interrupts.h> |
| 29 | #include <arch/spr_def.h> |
| 30 | |
| 31 | #ifdef CONFIG_PREEMPT |
| 32 | # error "No support for kernel preemption currently" |
| 33 | #endif |
| 34 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 35 | #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) |
| 36 | |
| 37 | #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) |
| 38 | |
| 39 | #if !CHIP_HAS_WH64() |
| 40 | /* By making this an empty macro, we can use wh64 in the code. */ |
| 41 | .macro wh64 reg |
| 42 | .endm |
| 43 | #endif |
| 44 | |
| 45 | .macro push_reg reg, ptr=sp, delta=-4 |
| 46 | { |
| 47 | sw \ptr, \reg |
| 48 | addli \ptr, \ptr, \delta |
| 49 | } |
| 50 | .endm |
| 51 | |
| 52 | .macro pop_reg reg, ptr=sp, delta=4 |
| 53 | { |
| 54 | lw \reg, \ptr |
| 55 | addli \ptr, \ptr, \delta |
| 56 | } |
| 57 | .endm |
| 58 | |
| 59 | .macro pop_reg_zero reg, zreg, ptr=sp, delta=4 |
| 60 | { |
| 61 | move \zreg, zero |
| 62 | lw \reg, \ptr |
| 63 | addi \ptr, \ptr, \delta |
| 64 | } |
| 65 | .endm |
| 66 | |
| 67 | .macro push_extra_callee_saves reg |
| 68 | PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51)) |
| 69 | push_reg r51, \reg |
| 70 | push_reg r50, \reg |
| 71 | push_reg r49, \reg |
| 72 | push_reg r48, \reg |
| 73 | push_reg r47, \reg |
| 74 | push_reg r46, \reg |
| 75 | push_reg r45, \reg |
| 76 | push_reg r44, \reg |
| 77 | push_reg r43, \reg |
| 78 | push_reg r42, \reg |
| 79 | push_reg r41, \reg |
| 80 | push_reg r40, \reg |
| 81 | push_reg r39, \reg |
| 82 | push_reg r38, \reg |
| 83 | push_reg r37, \reg |
| 84 | push_reg r36, \reg |
| 85 | push_reg r35, \reg |
| 86 | push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34) |
| 87 | .endm |
| 88 | |
| 89 | .macro panic str |
| 90 | .pushsection .rodata, "a" |
| 91 | 1: |
| 92 | .asciz "\str" |
| 93 | .popsection |
| 94 | { |
| 95 | moveli r0, lo16(1b) |
| 96 | } |
| 97 | { |
| 98 | auli r0, r0, ha16(1b) |
| 99 | jal panic |
| 100 | } |
| 101 | .endm |
| 102 | |
| 103 | #ifdef __COLLECT_LINKER_FEEDBACK__ |
| 104 | .pushsection .text.intvec_feedback,"ax" |
| 105 | intvec_feedback: |
| 106 | .popsection |
| 107 | #endif |
| 108 | |
| 109 | /* |
| 110 | * Default interrupt handler. |
| 111 | * |
| 112 | * vecnum is where we'll put this code. |
| 113 | * c_routine is the C routine we'll call. |
| 114 | * |
| 115 | * The C routine is passed two arguments: |
| 116 | * - A pointer to the pt_regs state. |
| 117 | * - The interrupt vector number. |
| 118 | * |
| 119 | * The "processing" argument specifies the code for processing |
| 120 | * the interrupt. Defaults to "handle_interrupt". |
| 121 | */ |
| 122 | .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt |
| 123 | .org (\vecnum << 8) |
| 124 | intvec_\vecname: |
| 125 | .ifc \vecnum, INT_SWINT_1 |
| 126 | blz TREG_SYSCALL_NR_NAME, sys_cmpxchg |
| 127 | .endif |
| 128 | |
| 129 | /* Temporarily save a register so we have somewhere to work. */ |
| 130 | |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 131 | mtspr SPR_SYSTEM_SAVE_K_1, r0 |
| 132 | mfspr r0, SPR_EX_CONTEXT_K_1 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 133 | |
| 134 | /* The cmpxchg code clears sp to force us to reset it here on fault. */ |
| 135 | { |
| 136 | bz sp, 2f |
| 137 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ |
| 138 | } |
| 139 | |
| 140 | .ifc \vecnum, INT_DOUBLE_FAULT |
| 141 | /* |
| 142 | * For double-faults from user-space, fall through to the normal |
| 143 | * register save and stack setup path. Otherwise, it's the |
| 144 | * hypervisor giving us one last chance to dump diagnostics, and we |
| 145 | * branch to the kernel_double_fault routine to do so. |
| 146 | */ |
| 147 | bz r0, 1f |
| 148 | j _kernel_double_fault |
| 149 | 1: |
| 150 | .else |
| 151 | /* |
| 152 | * If we're coming from user-space, then set sp to the top of |
| 153 | * the kernel stack. Otherwise, assume sp is already valid. |
| 154 | */ |
| 155 | { |
| 156 | bnz r0, 0f |
| 157 | move r0, sp |
| 158 | } |
| 159 | .endif |
| 160 | |
| 161 | .ifc \c_routine, do_page_fault |
| 162 | /* |
| 163 | * The page_fault handler may be downcalled directly by the |
| 164 | * hypervisor even when Linux is running and has ICS set. |
| 165 | * |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 166 | * In this case the contents of EX_CONTEXT_K_1 reflect the |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 167 | * previous fault and can't be relied on to choose whether or |
| 168 | * not to reinitialize the stack pointer. So we add a test |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 169 | * to see whether SYSTEM_SAVE_K_2 has the high bit set, |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 170 | * and if so we don't reinitialize sp, since we must be coming |
| 171 | * from Linux. (In fact the precise case is !(val & ~1), |
| 172 | * but any Linux PC has to have the high bit set.) |
| 173 | * |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 174 | * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 175 | * any path that turns into a downcall to one of our TLB handlers. |
| 176 | */ |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 177 | mfspr r0, SPR_SYSTEM_SAVE_K_2 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 178 | { |
| 179 | blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ |
| 180 | move r0, sp |
| 181 | } |
| 182 | .endif |
| 183 | |
| 184 | 2: |
| 185 | /* |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 186 | * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 187 | * the current stack top in the higher bits. So we recover |
| 188 | * our stack top by just masking off the low bits, then |
| 189 | * point sp at the top aligned address on the actual stack page. |
| 190 | */ |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 191 | mfspr r0, SPR_SYSTEM_SAVE_K_0 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 192 | mm r0, r0, zero, LOG2_THREAD_SIZE, 31 |
| 193 | |
| 194 | 0: |
| 195 | /* |
| 196 | * Align the stack mod 64 so we can properly predict what |
| 197 | * cache lines we need to write-hint to reduce memory fetch |
| 198 | * latency as we enter the kernel. The layout of memory is |
| 199 | * as follows, with cache line 0 at the lowest VA, and cache |
| 200 | * line 4 just below the r0 value this "andi" computes. |
| 201 | * Note that we never write to cache line 4, and we skip |
| 202 | * cache line 1 for syscalls. |
| 203 | * |
| 204 | * cache line 4: ptregs padding (two words) |
| 205 | * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad |
| 206 | * cache line 2: r30...r45 |
| 207 | * cache line 1: r14...r29 |
| 208 | * cache line 0: 2 x frame, r0..r13 |
| 209 | */ |
| 210 | andi r0, r0, -64 |
| 211 | |
| 212 | /* |
| 213 | * Push the first four registers on the stack, so that we can set |
| 214 | * them to vector-unique values before we jump to the common code. |
| 215 | * |
| 216 | * Registers are pushed on the stack as a struct pt_regs, |
| 217 | * with the sp initially just above the struct, and when we're |
| 218 | * done, sp points to the base of the struct, minus |
| 219 | * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code. |
| 220 | * |
| 221 | * This routine saves just the first four registers, plus the |
| 222 | * stack context so we can do proper backtracing right away, |
| 223 | * and defers to handle_interrupt to save the rest. |
| 224 | * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. |
| 225 | */ |
| 226 | addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) |
| 227 | wh64 r0 /* cache line 3 */ |
| 228 | { |
| 229 | sw r0, lr |
| 230 | addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR |
| 231 | } |
| 232 | { |
| 233 | sw r0, sp |
| 234 | addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP |
| 235 | } |
| 236 | { |
| 237 | sw sp, r52 |
| 238 | addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52) |
| 239 | } |
| 240 | wh64 sp /* cache line 0 */ |
| 241 | { |
| 242 | sw sp, r1 |
| 243 | addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1) |
| 244 | } |
| 245 | { |
| 246 | sw sp, r2 |
| 247 | addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2) |
| 248 | } |
| 249 | { |
| 250 | sw sp, r3 |
| 251 | addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) |
| 252 | } |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 253 | mfspr r0, SPR_EX_CONTEXT_K_0 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 254 | .ifc \processing,handle_syscall |
| 255 | /* |
| 256 | * Bump the saved PC by one bundle so that when we return, we won't |
| 257 | * execute the same swint instruction again. We need to do this while |
| 258 | * we're in the critical section. |
| 259 | */ |
| 260 | addi r0, r0, 8 |
| 261 | .endif |
| 262 | { |
| 263 | sw sp, r0 |
| 264 | addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC |
| 265 | } |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 266 | mfspr r0, SPR_EX_CONTEXT_K_1 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 267 | { |
| 268 | sw sp, r0 |
| 269 | addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 |
| 270 | /* |
| 271 | * Use r0 for syscalls so it's a temporary; use r1 for interrupts |
| 272 | * so that it gets passed through unchanged to the handler routine. |
| 273 | * Note that the .if conditional confusingly spans bundles. |
| 274 | */ |
| 275 | .ifc \processing,handle_syscall |
| 276 | movei r0, \vecnum |
| 277 | } |
| 278 | { |
| 279 | sw sp, r0 |
| 280 | .else |
| 281 | movei r1, \vecnum |
| 282 | } |
| 283 | { |
| 284 | sw sp, r1 |
| 285 | .endif |
| 286 | addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM |
| 287 | } |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 288 | mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 289 | { |
| 290 | sw sp, r0 |
| 291 | addi sp, sp, -PTREGS_OFFSET_REG(0) - 4 |
| 292 | } |
| 293 | { |
| 294 | sw sp, zero /* write zero into "Next SP" frame pointer */ |
| 295 | addi sp, sp, -4 /* leave SP pointing at bottom of frame */ |
| 296 | } |
| 297 | .ifc \processing,handle_syscall |
| 298 | j handle_syscall |
| 299 | .else |
| 300 | /* |
| 301 | * Capture per-interrupt SPR context to registers. |
| 302 | * We overload the meaning of r3 on this path such that if its bit 31 |
| 303 | * is set, we have to mask all interrupts including NMIs before |
| 304 | * clearing the interrupt critical section bit. |
| 305 | * See discussion below at "finish_interrupt_save". |
| 306 | */ |
| 307 | .ifc \c_routine, do_page_fault |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 308 | mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */ |
| 309 | mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 310 | .else |
| 311 | .ifc \vecnum, INT_DOUBLE_FAULT |
| 312 | { |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 313 | mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 314 | movei r3, 0 |
| 315 | } |
| 316 | .else |
| 317 | .ifc \c_routine, do_trap |
| 318 | { |
| 319 | mfspr r2, GPV_REASON |
| 320 | movei r3, 0 |
| 321 | } |
| 322 | .else |
| 323 | .ifc \c_routine, op_handle_perf_interrupt |
| 324 | { |
| 325 | mfspr r2, PERF_COUNT_STS |
| 326 | movei r3, -1 /* not used, but set for consistency */ |
| 327 | } |
| 328 | .else |
| 329 | #if CHIP_HAS_AUX_PERF_COUNTERS() |
| 330 | .ifc \c_routine, op_handle_aux_perf_interrupt |
| 331 | { |
| 332 | mfspr r2, AUX_PERF_COUNT_STS |
| 333 | movei r3, -1 /* not used, but set for consistency */ |
| 334 | } |
| 335 | .else |
| 336 | #endif |
| 337 | movei r3, 0 |
| 338 | #if CHIP_HAS_AUX_PERF_COUNTERS() |
| 339 | .endif |
| 340 | #endif |
| 341 | .endif |
| 342 | .endif |
| 343 | .endif |
| 344 | .endif |
| 345 | /* Put function pointer in r0 */ |
| 346 | moveli r0, lo16(\c_routine) |
| 347 | { |
| 348 | auli r0, r0, ha16(\c_routine) |
| 349 | j \processing |
| 350 | } |
| 351 | .endif |
| 352 | ENDPROC(intvec_\vecname) |
| 353 | |
| 354 | #ifdef __COLLECT_LINKER_FEEDBACK__ |
| 355 | .pushsection .text.intvec_feedback,"ax" |
| 356 | .org (\vecnum << 5) |
| 357 | FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8) |
| 358 | jrp lr |
| 359 | .popsection |
| 360 | #endif |
| 361 | |
| 362 | .endm |
| 363 | |
| 364 | |
| 365 | /* |
| 366 | * Save the rest of the registers that we didn't save in the actual |
| 367 | * vector itself. We can't use r0-r10 inclusive here. |
| 368 | */ |
| 369 | .macro finish_interrupt_save, function |
| 370 | |
| 371 | /* If it's a syscall, save a proper orig_r0, otherwise just zero. */ |
| 372 | PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0) |
| 373 | { |
| 374 | .ifc \function,handle_syscall |
| 375 | sw r52, r0 |
| 376 | .else |
| 377 | sw r52, zero |
| 378 | .endif |
| 379 | PTREGS_PTR(r52, PTREGS_OFFSET_TP) |
| 380 | } |
| 381 | |
| 382 | /* |
| 383 | * For ordinary syscalls, we save neither caller- nor callee- |
| 384 | * save registers, since the syscall invoker doesn't expect the |
| 385 | * caller-saves to be saved, and the called kernel functions will |
| 386 | * take care of saving the callee-saves for us. |
| 387 | * |
| 388 | * For interrupts we save just the caller-save registers. Saving |
| 389 | * them is required (since the "caller" can't save them). Again, |
| 390 | * the called kernel functions will restore the callee-save |
| 391 | * registers for us appropriately. |
| 392 | * |
| 393 | * On return, we normally restore nothing special for syscalls, |
| 394 | * and just the caller-save registers for interrupts. |
| 395 | * |
| 396 | * However, there are some important caveats to all this: |
| 397 | * |
| 398 | * - We always save a few callee-save registers to give us |
| 399 | * some scratchpad registers to carry across function calls. |
| 400 | * |
| 401 | * - fork/vfork/etc require us to save all the callee-save |
| 402 | * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below. |
| 403 | * |
| 404 | * - We always save r0..r5 and r10 for syscalls, since we need |
| 405 | * to reload them a bit later for the actual kernel call, and |
| 406 | * since we might need them for -ERESTARTNOINTR, etc. |
| 407 | * |
| 408 | * - Before invoking a signal handler, we save the unsaved |
| 409 | * callee-save registers so they are visible to the |
| 410 | * signal handler or any ptracer. |
| 411 | * |
| 412 | * - If the unsaved callee-save registers are modified, we set |
| 413 | * a bit in pt_regs so we know to reload them from pt_regs |
| 414 | * and not just rely on the kernel function unwinding. |
| 415 | * (Done for ptrace register writes and SA_SIGINFO handler.) |
| 416 | */ |
| 417 | { |
| 418 | sw r52, tp |
| 419 | PTREGS_PTR(r52, PTREGS_OFFSET_REG(33)) |
| 420 | } |
| 421 | wh64 r52 /* cache line 2 */ |
| 422 | push_reg r33, r52 |
| 423 | push_reg r32, r52 |
| 424 | push_reg r31, r52 |
| 425 | .ifc \function,handle_syscall |
| 426 | push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30) |
| 427 | push_reg TREG_SYSCALL_NR_NAME, r52, \ |
| 428 | PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL |
| 429 | .else |
| 430 | |
| 431 | push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30) |
| 432 | wh64 r52 /* cache line 1 */ |
| 433 | push_reg r29, r52 |
| 434 | push_reg r28, r52 |
| 435 | push_reg r27, r52 |
| 436 | push_reg r26, r52 |
| 437 | push_reg r25, r52 |
| 438 | push_reg r24, r52 |
| 439 | push_reg r23, r52 |
| 440 | push_reg r22, r52 |
| 441 | push_reg r21, r52 |
| 442 | push_reg r20, r52 |
| 443 | push_reg r19, r52 |
| 444 | push_reg r18, r52 |
| 445 | push_reg r17, r52 |
| 446 | push_reg r16, r52 |
| 447 | push_reg r15, r52 |
| 448 | push_reg r14, r52 |
| 449 | push_reg r13, r52 |
| 450 | push_reg r12, r52 |
| 451 | push_reg r11, r52 |
| 452 | push_reg r10, r52 |
| 453 | push_reg r9, r52 |
| 454 | push_reg r8, r52 |
| 455 | push_reg r7, r52 |
| 456 | push_reg r6, r52 |
| 457 | |
| 458 | .endif |
| 459 | |
| 460 | push_reg r5, r52 |
| 461 | sw r52, r4 |
| 462 | |
| 463 | /* Load tp with our per-cpu offset. */ |
| 464 | #ifdef CONFIG_SMP |
| 465 | { |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 466 | mfspr r20, SPR_SYSTEM_SAVE_K_0 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 467 | moveli r21, lo16(__per_cpu_offset) |
| 468 | } |
| 469 | { |
| 470 | auli r21, r21, ha16(__per_cpu_offset) |
| 471 | mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1 |
| 472 | } |
| 473 | s2a r20, r20, r21 |
| 474 | lw tp, r20 |
| 475 | #else |
| 476 | move tp, zero |
| 477 | #endif |
| 478 | |
| 479 | /* |
| 480 | * If we will be returning to the kernel, we will need to |
| 481 | * reset the interrupt masks to the state they had before. |
| 482 | * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled. |
| 483 | * We load flags in r32 here so we can jump to .Lrestore_regs |
| 484 | * directly after do_page_fault_ics() if necessary. |
| 485 | */ |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 486 | mfspr r32, SPR_EX_CONTEXT_K_1 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 487 | { |
| 488 | andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ |
| 489 | PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) |
| 490 | } |
| 491 | bzt r32, 1f /* zero if from user space */ |
| 492 | IRQS_DISABLED(r32) /* zero if irqs enabled */ |
| 493 | #if PT_FLAGS_DISABLE_IRQ != 1 |
| 494 | # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix |
| 495 | #endif |
| 496 | 1: |
| 497 | .ifnc \function,handle_syscall |
| 498 | /* Record the fact that we saved the caller-save registers above. */ |
| 499 | ori r32, r32, PT_FLAGS_CALLER_SAVES |
| 500 | .endif |
| 501 | sw r21, r32 |
| 502 | |
| 503 | #ifdef __COLLECT_LINKER_FEEDBACK__ |
| 504 | /* |
| 505 | * Notify the feedback routines that we were in the |
| 506 | * appropriate fixed interrupt vector area. Note that we |
| 507 | * still have ICS set at this point, so we can't invoke any |
| 508 | * atomic operations or we will panic. The feedback |
| 509 | * routines internally preserve r0..r10 and r30 up. |
| 510 | */ |
| 511 | .ifnc \function,handle_syscall |
| 512 | shli r20, r1, 5 |
| 513 | .else |
| 514 | moveli r20, INT_SWINT_1 << 5 |
| 515 | .endif |
| 516 | addli r20, r20, lo16(intvec_feedback) |
| 517 | auli r20, r20, ha16(intvec_feedback) |
| 518 | jalr r20 |
| 519 | |
| 520 | /* And now notify the feedback routines that we are here. */ |
| 521 | FEEDBACK_ENTER(\function) |
| 522 | #endif |
| 523 | |
| 524 | /* |
| 525 | * we've captured enough state to the stack (including in |
| 526 | * particular our EX_CONTEXT state) that we can now release |
| 527 | * the interrupt critical section and replace it with our |
| 528 | * standard "interrupts disabled" mask value. This allows |
| 529 | * synchronous interrupts (and profile interrupts) to punch |
| 530 | * through from this point onwards. |
| 531 | * |
| 532 | * If bit 31 of r3 is set during a non-NMI interrupt, we know we |
| 533 | * are on the path where the hypervisor has punched through our |
| 534 | * ICS with a page fault, so we call out to do_page_fault_ics() |
| 535 | * to figure out what to do with it. If the fault was in |
| 536 | * an atomic op, we unlock the atomic lock, adjust the |
| 537 | * saved register state a little, and return "zero" in r4, |
| 538 | * falling through into the normal page-fault interrupt code. |
| 539 | * If the fault was in a kernel-space atomic operation, then |
| 540 | * do_page_fault_ics() resolves it itself, returns "one" in r4, |
| 541 | * and as a result goes directly to restoring registers and iret, |
| 542 | * without trying to adjust the interrupt masks at all. |
| 543 | * The do_page_fault_ics() API involves passing and returning |
| 544 | * a five-word struct (in registers) to avoid writing the |
| 545 | * save and restore code here. |
| 546 | */ |
| 547 | .ifc \function,handle_nmi |
| 548 | IRQ_DISABLE_ALL(r20) |
| 549 | .else |
| 550 | .ifnc \function,handle_syscall |
| 551 | bgezt r3, 1f |
| 552 | { |
| 553 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) |
| 554 | jal do_page_fault_ics |
| 555 | } |
| 556 | FEEDBACK_REENTER(\function) |
| 557 | bzt r4, 1f |
| 558 | j .Lrestore_regs |
| 559 | 1: |
| 560 | .endif |
| 561 | IRQ_DISABLE(r20, r21) |
| 562 | .endif |
| 563 | mtspr INTERRUPT_CRITICAL_SECTION, zero |
| 564 | |
| 565 | #if CHIP_HAS_WH64() |
| 566 | /* |
| 567 | * Prepare the first 256 stack bytes to be rapidly accessible |
| 568 | * without having to fetch the background data. We don't really |
| 569 | * know how far to write-hint, but kernel stacks generally |
| 570 | * aren't that big, and write-hinting here does take some time. |
| 571 | */ |
| 572 | addi r52, sp, -64 |
| 573 | { |
| 574 | wh64 r52 |
| 575 | addi r52, r52, -64 |
| 576 | } |
| 577 | { |
| 578 | wh64 r52 |
| 579 | addi r52, r52, -64 |
| 580 | } |
| 581 | { |
| 582 | wh64 r52 |
| 583 | addi r52, r52, -64 |
| 584 | } |
| 585 | wh64 r52 |
| 586 | #endif |
| 587 | |
| 588 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 589 | .ifnc \function,handle_nmi |
| 590 | /* |
| 591 | * We finally have enough state set up to notify the irq |
| 592 | * tracing code that irqs were disabled on entry to the handler. |
| 593 | * The TRACE_IRQS_OFF call clobbers registers r0-r29. |
| 594 | * For syscalls, we already have the register state saved away |
| 595 | * on the stack, so we don't bother to do any register saves here, |
| 596 | * and later we pop the registers back off the kernel stack. |
| 597 | * For interrupt handlers, save r0-r3 in callee-saved registers. |
| 598 | */ |
| 599 | .ifnc \function,handle_syscall |
| 600 | { move r30, r0; move r31, r1 } |
| 601 | { move r32, r2; move r33, r3 } |
| 602 | .endif |
| 603 | TRACE_IRQS_OFF |
| 604 | .ifnc \function,handle_syscall |
| 605 | { move r0, r30; move r1, r31 } |
| 606 | { move r2, r32; move r3, r33 } |
| 607 | .endif |
| 608 | .endif |
| 609 | #endif |
| 610 | |
| 611 | .endm |
| 612 | |
| 613 | .macro check_single_stepping, kind, not_single_stepping |
| 614 | /* |
| 615 | * Check for single stepping in user-level priv |
| 616 | * kind can be "normal", "ill", or "syscall" |
| 617 | * At end, if fall-thru |
| 618 | * r29: thread_info->step_state |
| 619 | * r28: &pt_regs->pc |
| 620 | * r27: pt_regs->pc |
| 621 | * r26: thread_info->step_state->buffer |
| 622 | */ |
| 623 | |
| 624 | /* Check for single stepping */ |
| 625 | GET_THREAD_INFO(r29) |
| 626 | { |
| 627 | /* Get pointer to field holding step state */ |
| 628 | addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET |
| 629 | |
| 630 | /* Get pointer to EX1 in register state */ |
| 631 | PTREGS_PTR(r27, PTREGS_OFFSET_EX1) |
| 632 | } |
| 633 | { |
| 634 | /* Get pointer to field holding PC */ |
| 635 | PTREGS_PTR(r28, PTREGS_OFFSET_PC) |
| 636 | |
| 637 | /* Load the pointer to the step state */ |
| 638 | lw r29, r29 |
| 639 | } |
| 640 | /* Load EX1 */ |
| 641 | lw r27, r27 |
| 642 | { |
| 643 | /* Points to flags */ |
| 644 | addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET |
| 645 | |
| 646 | /* No single stepping if there is no step state structure */ |
| 647 | bzt r29, \not_single_stepping |
| 648 | } |
| 649 | { |
| 650 | /* mask off ICS and any other high bits */ |
| 651 | andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK |
| 652 | |
| 653 | /* Load pointer to single step instruction buffer */ |
| 654 | lw r26, r29 |
| 655 | } |
| 656 | /* Check priv state */ |
| 657 | bnz r27, \not_single_stepping |
| 658 | |
| 659 | /* Get flags */ |
| 660 | lw r22, r23 |
| 661 | { |
| 662 | /* Branch if single-step mode not enabled */ |
| 663 | bbnst r22, \not_single_stepping |
| 664 | |
| 665 | /* Clear enabled flag */ |
| 666 | andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED |
| 667 | } |
| 668 | .ifc \kind,normal |
| 669 | { |
| 670 | /* Load PC */ |
| 671 | lw r27, r28 |
| 672 | |
| 673 | /* Point to the entry containing the original PC */ |
| 674 | addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET |
| 675 | } |
| 676 | { |
| 677 | /* Disable single stepping flag */ |
| 678 | sw r23, r22 |
| 679 | } |
| 680 | { |
| 681 | /* Get the original pc */ |
| 682 | lw r24, r24 |
| 683 | |
| 684 | /* See if the PC is at the start of the single step buffer */ |
| 685 | seq r25, r26, r27 |
| 686 | } |
| 687 | /* |
| 688 | * NOTE: it is really expected that the PC be in the single step buffer |
| 689 | * at this point |
| 690 | */ |
| 691 | bzt r25, \not_single_stepping |
| 692 | |
| 693 | /* Restore the original PC */ |
| 694 | sw r28, r24 |
| 695 | .else |
| 696 | .ifc \kind,syscall |
| 697 | { |
| 698 | /* Load PC */ |
| 699 | lw r27, r28 |
| 700 | |
| 701 | /* Point to the entry containing the next PC */ |
| 702 | addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET |
| 703 | } |
| 704 | { |
| 705 | /* Increment the stopped PC by the bundle size */ |
| 706 | addi r26, r26, 8 |
| 707 | |
| 708 | /* Disable single stepping flag */ |
| 709 | sw r23, r22 |
| 710 | } |
| 711 | { |
| 712 | /* Get the next pc */ |
| 713 | lw r24, r24 |
| 714 | |
| 715 | /* |
| 716 | * See if the PC is one bundle past the start of the |
| 717 | * single step buffer |
| 718 | */ |
| 719 | seq r25, r26, r27 |
| 720 | } |
| 721 | { |
| 722 | /* |
| 723 | * NOTE: it is really expected that the PC be in the |
| 724 | * single step buffer at this point |
| 725 | */ |
| 726 | bzt r25, \not_single_stepping |
| 727 | } |
| 728 | /* Set to the next PC */ |
| 729 | sw r28, r24 |
| 730 | .else |
| 731 | { |
| 732 | /* Point to 3rd bundle in buffer */ |
| 733 | addi r25, r26, 16 |
| 734 | |
| 735 | /* Load PC */ |
| 736 | lw r27, r28 |
| 737 | } |
| 738 | { |
| 739 | /* Disable single stepping flag */ |
| 740 | sw r23, r22 |
| 741 | |
| 742 | /* See if the PC is in the single step buffer */ |
| 743 | slte_u r24, r26, r27 |
| 744 | } |
| 745 | { |
| 746 | slte_u r25, r27, r25 |
| 747 | |
| 748 | /* |
| 749 | * NOTE: it is really expected that the PC be in the |
| 750 | * single step buffer at this point |
| 751 | */ |
| 752 | bzt r24, \not_single_stepping |
| 753 | } |
| 754 | bzt r25, \not_single_stepping |
| 755 | .endif |
| 756 | .endif |
| 757 | .endm |
| 758 | |
| 759 | /* |
| 760 | * Redispatch a downcall. |
| 761 | */ |
| 762 | .macro dc_dispatch vecnum, vecname |
| 763 | .org (\vecnum << 8) |
| 764 | intvec_\vecname: |
| 765 | j hv_downcall_dispatch |
| 766 | ENDPROC(intvec_\vecname) |
| 767 | .endm |
| 768 | |
| 769 | /* |
| 770 | * Common code for most interrupts. The C function we're eventually |
| 771 | * going to is in r0, and the faultnum is in r1; the original |
| 772 | * values for those registers are on the stack. |
| 773 | */ |
| 774 | .pushsection .text.handle_interrupt,"ax" |
| 775 | handle_interrupt: |
| 776 | finish_interrupt_save handle_interrupt |
| 777 | |
| 778 | /* |
| 779 | * Check for if we are single stepping in user level. If so, then |
| 780 | * we need to restore the PC. |
| 781 | */ |
| 782 | |
| 783 | check_single_stepping normal, .Ldispatch_interrupt |
| 784 | .Ldispatch_interrupt: |
| 785 | |
| 786 | /* Jump to the C routine; it should enable irqs as soon as possible. */ |
| 787 | { |
| 788 | jalr r0 |
| 789 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) |
| 790 | } |
| 791 | FEEDBACK_REENTER(handle_interrupt) |
| 792 | { |
| 793 | movei r30, 0 /* not an NMI */ |
| 794 | j interrupt_return |
| 795 | } |
| 796 | STD_ENDPROC(handle_interrupt) |
| 797 | |
| 798 | /* |
| 799 | * This routine takes a boolean in r30 indicating if this is an NMI. |
| 800 | * If so, we also expect a boolean in r31 indicating whether to |
| 801 | * re-enable the oprofile interrupts. |
| 802 | */ |
| 803 | STD_ENTRY(interrupt_return) |
| 804 | /* If we're resuming to kernel space, don't check thread flags. */ |
| 805 | { |
| 806 | bnz r30, .Lrestore_all /* NMIs don't special-case user-space */ |
| 807 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) |
| 808 | } |
| 809 | lw r29, r29 |
| 810 | andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ |
| 811 | { |
| 812 | bzt r29, .Lresume_userspace |
| 813 | PTREGS_PTR(r29, PTREGS_OFFSET_PC) |
| 814 | } |
| 815 | |
| 816 | /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ |
| 817 | { |
| 818 | lw r28, r29 |
| 819 | moveli r27, lo16(_cpu_idle_nap) |
| 820 | } |
| 821 | { |
| 822 | auli r27, r27, ha16(_cpu_idle_nap) |
| 823 | } |
| 824 | { |
| 825 | seq r27, r27, r28 |
| 826 | } |
| 827 | { |
| 828 | bbns r27, .Lrestore_all |
| 829 | addi r28, r28, 8 |
| 830 | } |
| 831 | sw r29, r28 |
| 832 | j .Lrestore_all |
| 833 | |
| 834 | .Lresume_userspace: |
| 835 | FEEDBACK_REENTER(interrupt_return) |
| 836 | |
| 837 | /* |
| 838 | * Disable interrupts so as to make sure we don't |
| 839 | * miss an interrupt that sets any of the thread flags (like |
| 840 | * need_resched or sigpending) between sampling and the iret. |
| 841 | * Routines like schedule() or do_signal() may re-enable |
| 842 | * interrupts before returning. |
| 843 | */ |
| 844 | IRQ_DISABLE(r20, r21) |
| 845 | TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ |
| 846 | |
| 847 | /* Get base of stack in r32; note r30/31 are used as arguments here. */ |
| 848 | GET_THREAD_INFO(r32) |
| 849 | |
| 850 | |
| 851 | /* Check to see if there is any work to do before returning to user. */ |
| 852 | { |
| 853 | addi r29, r32, THREAD_INFO_FLAGS_OFFSET |
| 854 | moveli r28, lo16(_TIF_ALLWORK_MASK) |
| 855 | } |
| 856 | { |
| 857 | lw r29, r29 |
| 858 | auli r28, r28, ha16(_TIF_ALLWORK_MASK) |
| 859 | } |
| 860 | and r28, r29, r28 |
| 861 | bnz r28, .Lwork_pending |
| 862 | |
| 863 | /* |
| 864 | * In the NMI case we |
| 865 | * omit the call to single_process_check_nohz, which normally checks |
| 866 | * to see if we should start or stop the scheduler tick, because |
| 867 | * we can't call arbitrary Linux code from an NMI context. |
| 868 | * We always call the homecache TLB deferral code to re-trigger |
| 869 | * the deferral mechanism. |
| 870 | * |
| 871 | * The other chunk of responsibility this code has is to reset the |
| 872 | * interrupt masks appropriately to reset irqs and NMIs. We have |
| 873 | * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the |
| 874 | * lockdep-type stuff, but we can't set ICS until afterwards, since |
| 875 | * ICS can only be used in very tight chunks of code to avoid |
| 876 | * tripping over various assertions that it is off. |
| 877 | * |
| 878 | * (There is what looks like a window of vulnerability here since |
| 879 | * we might take a profile interrupt between the two SPR writes |
| 880 | * that set the mask, but since we write the low SPR word first, |
| 881 | * and our interrupt entry code checks the low SPR word, any |
| 882 | * profile interrupt will actually disable interrupts in both SPRs |
| 883 | * before returning, which is OK.) |
| 884 | */ |
| 885 | .Lrestore_all: |
| 886 | PTREGS_PTR(r0, PTREGS_OFFSET_EX1) |
| 887 | { |
| 888 | lw r0, r0 |
| 889 | PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS) |
| 890 | } |
| 891 | { |
| 892 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK |
| 893 | lw r32, r32 |
| 894 | } |
| 895 | bnz r0, 1f |
| 896 | j 2f |
| 897 | #if PT_FLAGS_DISABLE_IRQ != 1 |
| 898 | # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below |
| 899 | #endif |
| 900 | 1: bbnst r32, 2f |
| 901 | IRQ_DISABLE(r20,r21) |
| 902 | TRACE_IRQS_OFF |
| 903 | movei r0, 1 |
| 904 | mtspr INTERRUPT_CRITICAL_SECTION, r0 |
| 905 | bzt r30, .Lrestore_regs |
| 906 | j 3f |
| 907 | 2: TRACE_IRQS_ON |
| 908 | movei r0, 1 |
| 909 | mtspr INTERRUPT_CRITICAL_SECTION, r0 |
| 910 | IRQ_ENABLE(r20, r21) |
| 911 | bzt r30, .Lrestore_regs |
| 912 | 3: |
| 913 | |
| 914 | |
| 915 | /* |
| 916 | * We now commit to returning from this interrupt, since we will be |
| 917 | * doing things like setting EX_CONTEXT SPRs and unwinding the stack |
| 918 | * frame. No calls should be made to any other code after this point. |
| 919 | * This code should only be entered with ICS set. |
| 920 | * r32 must still be set to ptregs.flags. |
| 921 | * We launch loads to each cache line separately first, so we can |
| 922 | * get some parallelism out of the memory subsystem. |
| 923 | * We start zeroing caller-saved registers throughout, since |
| 924 | * that will save some cycles if this turns out to be a syscall. |
| 925 | */ |
| 926 | .Lrestore_regs: |
| 927 | FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */ |
| 928 | |
| 929 | /* |
| 930 | * Rotate so we have one high bit and one low bit to test. |
| 931 | * - low bit says whether to restore all the callee-saved registers, |
| 932 | * or just r30-r33, and r52 up. |
| 933 | * - high bit (i.e. sign bit) says whether to restore all the |
| 934 | * caller-saved registers, or just r0. |
| 935 | */ |
| 936 | #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4 |
| 937 | # error Rotate trick does not work :-) |
| 938 | #endif |
| 939 | { |
| 940 | rli r20, r32, 30 |
| 941 | PTREGS_PTR(sp, PTREGS_OFFSET_REG(0)) |
| 942 | } |
| 943 | |
| 944 | /* |
| 945 | * Load cache lines 0, 2, and 3 in that order, then use |
| 946 | * the last loaded value, which makes it likely that the other |
| 947 | * cache lines have also loaded, at which point we should be |
| 948 | * able to safely read all the remaining words on those cache |
| 949 | * lines without waiting for the memory subsystem. |
| 950 | */ |
Chris Metcalf | ba00376 | 2010-08-13 16:37:00 -0400 | [diff] [blame] | 951 | pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 952 | pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30) |
| 953 | pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC |
| 954 | pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 |
| 955 | { |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 956 | mtspr SPR_EX_CONTEXT_K_0, r21 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 957 | move r5, zero |
| 958 | } |
| 959 | { |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 960 | mtspr SPR_EX_CONTEXT_K_1, lr |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 961 | andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ |
| 962 | } |
| 963 | |
| 964 | /* Restore callee-saveds that we actually use. */ |
| 965 | pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52) |
| 966 | pop_reg_zero r31, r7 |
| 967 | pop_reg_zero r32, r8 |
| 968 | pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33) |
| 969 | |
| 970 | /* |
| 971 | * If we modified other callee-saveds, restore them now. |
| 972 | * This is rare, but could be via ptrace or signal handler. |
| 973 | */ |
| 974 | { |
| 975 | move r10, zero |
| 976 | bbs r20, .Lrestore_callees |
| 977 | } |
| 978 | .Lcontinue_restore_regs: |
| 979 | |
| 980 | /* Check if we're returning from a syscall. */ |
| 981 | { |
| 982 | move r11, zero |
| 983 | blzt r20, 1f /* no, so go restore callee-save registers */ |
| 984 | } |
| 985 | |
| 986 | /* |
| 987 | * Check if we're returning to userspace. |
| 988 | * Note that if we're not, we don't worry about zeroing everything. |
| 989 | */ |
| 990 | { |
| 991 | addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29) |
| 992 | bnz lr, .Lkernel_return |
| 993 | } |
| 994 | |
| 995 | /* |
| 996 | * On return from syscall, we've restored r0 from pt_regs, but we |
| 997 | * clear the remainder of the caller-saved registers. We could |
| 998 | * restore the syscall arguments, but there's not much point, |
| 999 | * and it ensures user programs aren't trying to use the |
| 1000 | * caller-saves if we clear them, as well as avoiding leaking |
| 1001 | * kernel pointers into userspace. |
| 1002 | */ |
| 1003 | pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR |
| 1004 | pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP |
| 1005 | { |
| 1006 | lw sp, sp |
| 1007 | move r14, zero |
| 1008 | move r15, zero |
| 1009 | } |
| 1010 | { move r16, zero; move r17, zero } |
| 1011 | { move r18, zero; move r19, zero } |
| 1012 | { move r20, zero; move r21, zero } |
| 1013 | { move r22, zero; move r23, zero } |
| 1014 | { move r24, zero; move r25, zero } |
| 1015 | { move r26, zero; move r27, zero } |
Chris Metcalf | ba00376 | 2010-08-13 16:37:00 -0400 | [diff] [blame] | 1016 | |
| 1017 | /* Set r1 to errno if we are returning an error, otherwise zero. */ |
| 1018 | { |
Chris Metcalf | a4dbc5e | 2010-10-14 15:14:29 -0400 | [diff] [blame] | 1019 | moveli r29, 4096 |
Chris Metcalf | ba00376 | 2010-08-13 16:37:00 -0400 | [diff] [blame] | 1020 | sub r1, zero, r0 |
| 1021 | } |
| 1022 | slt_u r29, r1, r29 |
| 1023 | { |
| 1024 | mnz r1, r29, r1 |
| 1025 | move r29, zero |
| 1026 | } |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1027 | iret |
| 1028 | |
| 1029 | /* |
| 1030 | * Not a syscall, so restore caller-saved registers. |
| 1031 | * First kick off a load for cache line 1, which we're touching |
| 1032 | * for the first time here. |
| 1033 | */ |
| 1034 | .align 64 |
| 1035 | 1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29) |
| 1036 | pop_reg r1 |
| 1037 | pop_reg r2 |
| 1038 | pop_reg r3 |
| 1039 | pop_reg r4 |
| 1040 | pop_reg r5 |
| 1041 | pop_reg r6 |
| 1042 | pop_reg r7 |
| 1043 | pop_reg r8 |
| 1044 | pop_reg r9 |
| 1045 | pop_reg r10 |
| 1046 | pop_reg r11 |
| 1047 | pop_reg r12 |
| 1048 | pop_reg r13 |
| 1049 | pop_reg r14 |
| 1050 | pop_reg r15 |
| 1051 | pop_reg r16 |
| 1052 | pop_reg r17 |
| 1053 | pop_reg r18 |
| 1054 | pop_reg r19 |
| 1055 | pop_reg r20 |
| 1056 | pop_reg r21 |
| 1057 | pop_reg r22 |
| 1058 | pop_reg r23 |
| 1059 | pop_reg r24 |
| 1060 | pop_reg r25 |
| 1061 | pop_reg r26 |
| 1062 | pop_reg r27 |
| 1063 | pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28) |
| 1064 | /* r29 already restored above */ |
| 1065 | bnz lr, .Lkernel_return |
| 1066 | pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR |
| 1067 | pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP |
| 1068 | lw sp, sp |
| 1069 | iret |
| 1070 | |
| 1071 | /* |
| 1072 | * We can't restore tp when in kernel mode, since a thread might |
| 1073 | * have migrated from another cpu and brought a stale tp value. |
| 1074 | */ |
| 1075 | .Lkernel_return: |
| 1076 | pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR |
| 1077 | lw sp, sp |
| 1078 | iret |
| 1079 | |
| 1080 | /* Restore callee-saved registers from r34 to r51. */ |
| 1081 | .Lrestore_callees: |
| 1082 | addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29) |
| 1083 | pop_reg r34 |
| 1084 | pop_reg r35 |
| 1085 | pop_reg r36 |
| 1086 | pop_reg r37 |
| 1087 | pop_reg r38 |
| 1088 | pop_reg r39 |
| 1089 | pop_reg r40 |
| 1090 | pop_reg r41 |
| 1091 | pop_reg r42 |
| 1092 | pop_reg r43 |
| 1093 | pop_reg r44 |
| 1094 | pop_reg r45 |
| 1095 | pop_reg r46 |
| 1096 | pop_reg r47 |
| 1097 | pop_reg r48 |
| 1098 | pop_reg r49 |
| 1099 | pop_reg r50 |
| 1100 | pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) |
| 1101 | j .Lcontinue_restore_regs |
| 1102 | |
| 1103 | .Lwork_pending: |
| 1104 | /* Mask the reschedule flag */ |
| 1105 | andi r28, r29, _TIF_NEED_RESCHED |
| 1106 | |
| 1107 | { |
| 1108 | /* |
| 1109 | * If the NEED_RESCHED flag is called, we call schedule(), which |
| 1110 | * may drop this context right here and go do something else. |
| 1111 | * On return, jump back to .Lresume_userspace and recheck. |
| 1112 | */ |
| 1113 | bz r28, .Lasync_tlb |
| 1114 | |
| 1115 | /* Mask the async-tlb flag */ |
| 1116 | andi r28, r29, _TIF_ASYNC_TLB |
| 1117 | } |
| 1118 | |
| 1119 | jal schedule |
| 1120 | FEEDBACK_REENTER(interrupt_return) |
| 1121 | |
| 1122 | /* Reload the flags and check again */ |
| 1123 | j .Lresume_userspace |
| 1124 | |
| 1125 | .Lasync_tlb: |
| 1126 | { |
| 1127 | bz r28, .Lneed_sigpending |
| 1128 | |
| 1129 | /* Mask the sigpending flag */ |
| 1130 | andi r28, r29, _TIF_SIGPENDING |
| 1131 | } |
| 1132 | |
| 1133 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) |
| 1134 | jal do_async_page_fault |
| 1135 | FEEDBACK_REENTER(interrupt_return) |
| 1136 | |
| 1137 | /* |
| 1138 | * Go restart the "resume userspace" process. We may have |
| 1139 | * fired a signal, and we need to disable interrupts again. |
| 1140 | */ |
| 1141 | j .Lresume_userspace |
| 1142 | |
| 1143 | .Lneed_sigpending: |
| 1144 | /* |
| 1145 | * At this point we are either doing signal handling or single-step, |
| 1146 | * so either way make sure we have all the registers saved. |
| 1147 | */ |
| 1148 | push_extra_callee_saves r0 |
| 1149 | |
| 1150 | { |
| 1151 | /* If no signal pending, skip to singlestep check */ |
| 1152 | bz r28, .Lneed_singlestep |
| 1153 | |
| 1154 | /* Mask the singlestep flag */ |
| 1155 | andi r28, r29, _TIF_SINGLESTEP |
| 1156 | } |
| 1157 | |
| 1158 | jal do_signal |
| 1159 | FEEDBACK_REENTER(interrupt_return) |
| 1160 | |
| 1161 | /* Reload the flags and check again */ |
| 1162 | j .Lresume_userspace |
| 1163 | |
| 1164 | .Lneed_singlestep: |
| 1165 | { |
| 1166 | /* Get a pointer to the EX1 field */ |
| 1167 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) |
| 1168 | |
| 1169 | /* If we get here, our bit must be set. */ |
| 1170 | bz r28, .Lwork_confusion |
| 1171 | } |
| 1172 | /* If we are in priv mode, don't single step */ |
| 1173 | lw r28, r29 |
| 1174 | andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ |
| 1175 | bnz r28, .Lrestore_all |
| 1176 | |
| 1177 | /* Allow interrupts within the single step code */ |
| 1178 | TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */ |
| 1179 | IRQ_ENABLE(r20, r21) |
| 1180 | |
| 1181 | /* try to single-step the current instruction */ |
| 1182 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) |
| 1183 | jal single_step_once |
| 1184 | FEEDBACK_REENTER(interrupt_return) |
| 1185 | |
| 1186 | /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */ |
| 1187 | IRQ_DISABLE(r20,r21) |
| 1188 | |
| 1189 | j .Lrestore_all |
| 1190 | |
| 1191 | .Lwork_confusion: |
| 1192 | move r0, r28 |
| 1193 | panic "thread_info allwork flags unhandled on userspace resume: %#x" |
| 1194 | |
| 1195 | STD_ENDPROC(interrupt_return) |
| 1196 | |
| 1197 | /* |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1198 | * Some interrupts don't check for single stepping |
| 1199 | */ |
| 1200 | .pushsection .text.handle_interrupt_no_single_step,"ax" |
| 1201 | handle_interrupt_no_single_step: |
| 1202 | finish_interrupt_save handle_interrupt_no_single_step |
| 1203 | { |
| 1204 | jalr r0 |
| 1205 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) |
| 1206 | } |
| 1207 | FEEDBACK_REENTER(handle_interrupt_no_single_step) |
| 1208 | { |
| 1209 | movei r30, 0 /* not an NMI */ |
| 1210 | j interrupt_return |
| 1211 | } |
| 1212 | STD_ENDPROC(handle_interrupt_no_single_step) |
| 1213 | |
| 1214 | /* |
| 1215 | * "NMI" interrupts mask ALL interrupts before calling the |
| 1216 | * handler, and don't check thread flags, etc., on the way |
| 1217 | * back out. In general, the only things we do here for NMIs |
| 1218 | * are the register save/restore, fixing the PC if we were |
| 1219 | * doing single step, and the dataplane kernel-TLB management. |
| 1220 | * We don't (for example) deal with start/stop of the sched tick. |
| 1221 | */ |
| 1222 | .pushsection .text.handle_nmi,"ax" |
| 1223 | handle_nmi: |
| 1224 | finish_interrupt_save handle_nmi |
| 1225 | check_single_stepping normal, .Ldispatch_nmi |
| 1226 | .Ldispatch_nmi: |
| 1227 | { |
| 1228 | jalr r0 |
| 1229 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) |
| 1230 | } |
| 1231 | FEEDBACK_REENTER(handle_nmi) |
| 1232 | j interrupt_return |
| 1233 | STD_ENDPROC(handle_nmi) |
| 1234 | |
| 1235 | /* |
| 1236 | * Parallel code for syscalls to handle_interrupt. |
| 1237 | */ |
| 1238 | .pushsection .text.handle_syscall,"ax" |
| 1239 | handle_syscall: |
| 1240 | finish_interrupt_save handle_syscall |
| 1241 | |
| 1242 | /* |
| 1243 | * Check for if we are single stepping in user level. If so, then |
| 1244 | * we need to restore the PC. |
| 1245 | */ |
| 1246 | check_single_stepping syscall, .Ldispatch_syscall |
| 1247 | .Ldispatch_syscall: |
| 1248 | |
| 1249 | /* Enable irqs. */ |
| 1250 | TRACE_IRQS_ON |
| 1251 | IRQ_ENABLE(r20, r21) |
| 1252 | |
| 1253 | /* Bump the counter for syscalls made on this tile. */ |
| 1254 | moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) |
| 1255 | auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) |
| 1256 | add r20, r20, tp |
| 1257 | lw r21, r20 |
| 1258 | addi r21, r21, 1 |
| 1259 | sw r20, r21 |
| 1260 | |
| 1261 | /* Trace syscalls, if requested. */ |
| 1262 | GET_THREAD_INFO(r31) |
| 1263 | addi r31, r31, THREAD_INFO_FLAGS_OFFSET |
| 1264 | lw r30, r31 |
| 1265 | andi r30, r30, _TIF_SYSCALL_TRACE |
| 1266 | bzt r30, .Lrestore_syscall_regs |
| 1267 | jal do_syscall_trace |
| 1268 | FEEDBACK_REENTER(handle_syscall) |
| 1269 | |
| 1270 | /* |
| 1271 | * We always reload our registers from the stack at this |
| 1272 | * point. They might be valid, if we didn't build with |
| 1273 | * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not |
| 1274 | * doing syscall tracing, but there are enough cases now that it |
| 1275 | * seems simplest just to do the reload unconditionally. |
| 1276 | */ |
| 1277 | .Lrestore_syscall_regs: |
| 1278 | PTREGS_PTR(r11, PTREGS_OFFSET_REG(0)) |
| 1279 | pop_reg r0, r11 |
| 1280 | pop_reg r1, r11 |
| 1281 | pop_reg r2, r11 |
| 1282 | pop_reg r3, r11 |
| 1283 | pop_reg r4, r11 |
| 1284 | pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5) |
| 1285 | pop_reg TREG_SYSCALL_NR_NAME, r11 |
| 1286 | |
| 1287 | /* Ensure that the syscall number is within the legal range. */ |
| 1288 | moveli r21, __NR_syscalls |
| 1289 | { |
| 1290 | slt_u r21, TREG_SYSCALL_NR_NAME, r21 |
| 1291 | moveli r20, lo16(sys_call_table) |
| 1292 | } |
| 1293 | { |
| 1294 | bbns r21, .Linvalid_syscall |
| 1295 | auli r20, r20, ha16(sys_call_table) |
| 1296 | } |
| 1297 | s2a r20, TREG_SYSCALL_NR_NAME, r20 |
| 1298 | lw r20, r20 |
| 1299 | |
| 1300 | /* Jump to syscall handler. */ |
Chris Metcalf | 81711ce | 2010-12-14 16:07:25 -0500 | [diff] [blame] | 1301 | jalr r20 |
| 1302 | .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1303 | |
| 1304 | /* |
| 1305 | * Write our r0 onto the stack so it gets restored instead |
| 1306 | * of whatever the user had there before. |
| 1307 | */ |
| 1308 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) |
| 1309 | sw r29, r0 |
| 1310 | |
Chris Metcalf | 81711ce | 2010-12-14 16:07:25 -0500 | [diff] [blame] | 1311 | .Lsyscall_sigreturn_skip: |
| 1312 | FEEDBACK_REENTER(handle_syscall) |
| 1313 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1314 | /* Do syscall trace again, if requested. */ |
| 1315 | lw r30, r31 |
| 1316 | andi r30, r30, _TIF_SYSCALL_TRACE |
| 1317 | bzt r30, 1f |
| 1318 | jal do_syscall_trace |
| 1319 | FEEDBACK_REENTER(handle_syscall) |
| 1320 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ |
| 1321 | |
| 1322 | .Linvalid_syscall: |
| 1323 | /* Report an invalid syscall back to the user program */ |
| 1324 | { |
| 1325 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) |
| 1326 | movei r28, -ENOSYS |
| 1327 | } |
| 1328 | sw r29, r28 |
| 1329 | j .Lresume_userspace /* jump into middle of interrupt_return */ |
| 1330 | STD_ENDPROC(handle_syscall) |
| 1331 | |
| 1332 | /* Return the address for oprofile to suppress in backtraces. */ |
| 1333 | STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall) |
| 1334 | lnk r0 |
| 1335 | { |
| 1336 | addli r0, r0, .Lhandle_syscall_link - . |
| 1337 | jrp lr |
| 1338 | } |
| 1339 | STD_ENDPROC(handle_syscall_link_address) |
| 1340 | |
| 1341 | STD_ENTRY(ret_from_fork) |
| 1342 | jal sim_notify_fork |
| 1343 | jal schedule_tail |
| 1344 | FEEDBACK_REENTER(ret_from_fork) |
| 1345 | j .Lresume_userspace /* jump into middle of interrupt_return */ |
| 1346 | STD_ENDPROC(ret_from_fork) |
| 1347 | |
| 1348 | /* |
| 1349 | * Code for ill interrupt. |
| 1350 | */ |
| 1351 | .pushsection .text.handle_ill,"ax" |
| 1352 | handle_ill: |
| 1353 | finish_interrupt_save handle_ill |
| 1354 | |
| 1355 | /* |
| 1356 | * Check for if we are single stepping in user level. If so, then |
| 1357 | * we need to restore the PC. |
| 1358 | */ |
| 1359 | check_single_stepping ill, .Ldispatch_normal_ill |
| 1360 | |
| 1361 | { |
| 1362 | /* See if the PC is the 1st bundle in the buffer */ |
| 1363 | seq r25, r27, r26 |
| 1364 | |
| 1365 | /* Point to the 2nd bundle in the buffer */ |
| 1366 | addi r26, r26, 8 |
| 1367 | } |
| 1368 | { |
| 1369 | /* Point to the original pc */ |
| 1370 | addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET |
| 1371 | |
| 1372 | /* Branch if the PC is the 1st bundle in the buffer */ |
| 1373 | bnz r25, 3f |
| 1374 | } |
| 1375 | { |
| 1376 | /* See if the PC is the 2nd bundle of the buffer */ |
| 1377 | seq r25, r27, r26 |
| 1378 | |
| 1379 | /* Set PC to next instruction */ |
| 1380 | addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET |
| 1381 | } |
| 1382 | { |
| 1383 | /* Point to flags */ |
| 1384 | addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET |
| 1385 | |
| 1386 | /* Branch if PC is in the second bundle */ |
| 1387 | bz r25, 2f |
| 1388 | } |
| 1389 | /* Load flags */ |
| 1390 | lw r25, r25 |
| 1391 | { |
| 1392 | /* |
| 1393 | * Get the offset for the register to restore |
| 1394 | * Note: the lower bound is 2, so we have implicit scaling by 4. |
| 1395 | * No multiplication of the register number by the size of a register |
| 1396 | * is needed. |
| 1397 | */ |
| 1398 | mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \ |
| 1399 | SINGLESTEP_STATE_TARGET_UB |
| 1400 | |
| 1401 | /* Mask Rewrite_LR */ |
| 1402 | andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE |
| 1403 | } |
| 1404 | { |
| 1405 | addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET |
| 1406 | |
| 1407 | /* Don't rewrite temp register */ |
| 1408 | bz r25, 3f |
| 1409 | } |
| 1410 | { |
| 1411 | /* Get the temp value */ |
| 1412 | lw r29, r29 |
| 1413 | |
| 1414 | /* Point to where the register is stored */ |
| 1415 | add r27, r27, sp |
| 1416 | } |
| 1417 | |
| 1418 | /* Add in the C ABI save area size to the register offset */ |
| 1419 | addi r27, r27, C_ABI_SAVE_AREA_SIZE |
| 1420 | |
| 1421 | /* Restore the user's register with the temp value */ |
| 1422 | sw r27, r29 |
| 1423 | j 3f |
| 1424 | |
| 1425 | 2: |
| 1426 | /* Must be in the third bundle */ |
| 1427 | addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET |
| 1428 | |
| 1429 | 3: |
| 1430 | /* set PC and continue */ |
| 1431 | lw r26, r24 |
| 1432 | sw r28, r26 |
| 1433 | |
Chris Metcalf | 233325b | 2010-10-14 16:32:41 -0400 | [diff] [blame] | 1434 | /* |
| 1435 | * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill. |
| 1436 | * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we |
| 1437 | * need to clear it here and can't really impose on all other arches. |
| 1438 | * So what's another write between friends? |
| 1439 | */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1440 | GET_THREAD_INFO(r0) |
| 1441 | |
| 1442 | addi r1, r0, THREAD_INFO_FLAGS_OFFSET |
| 1443 | { |
| 1444 | lw r2, r1 |
| 1445 | addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */ |
| 1446 | } |
| 1447 | andi r2, r2, ~_TIF_SINGLESTEP |
| 1448 | sw r1, r2 |
| 1449 | |
| 1450 | /* Issue a sigtrap */ |
| 1451 | { |
| 1452 | lw r0, r0 /* indirect thru thread_info to get task_info*/ |
| 1453 | addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */ |
| 1454 | move r2, zero /* load error code into r2 */ |
| 1455 | } |
| 1456 | |
| 1457 | jal send_sigtrap /* issue a SIGTRAP */ |
| 1458 | FEEDBACK_REENTER(handle_ill) |
| 1459 | j .Lresume_userspace /* jump into middle of interrupt_return */ |
| 1460 | |
| 1461 | .Ldispatch_normal_ill: |
| 1462 | { |
| 1463 | jalr r0 |
| 1464 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) |
| 1465 | } |
| 1466 | FEEDBACK_REENTER(handle_ill) |
| 1467 | { |
| 1468 | movei r30, 0 /* not an NMI */ |
| 1469 | j interrupt_return |
| 1470 | } |
| 1471 | STD_ENDPROC(handle_ill) |
| 1472 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1473 | /* Various stub interrupt handlers and syscall handlers */ |
| 1474 | |
| 1475 | STD_ENTRY_LOCAL(_kernel_double_fault) |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1476 | mfspr r1, SPR_EX_CONTEXT_K_0 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1477 | move r2, lr |
| 1478 | move r3, sp |
| 1479 | move r4, r52 |
| 1480 | addi sp, sp, -C_ABI_SAVE_AREA_SIZE |
| 1481 | j kernel_double_fault |
| 1482 | STD_ENDPROC(_kernel_double_fault) |
| 1483 | |
| 1484 | STD_ENTRY_LOCAL(bad_intr) |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1485 | mfspr r2, SPR_EX_CONTEXT_K_0 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1486 | panic "Unhandled interrupt %#x: PC %#lx" |
| 1487 | STD_ENDPROC(bad_intr) |
| 1488 | |
| 1489 | /* Put address of pt_regs in reg and jump. */ |
| 1490 | #define PTREGS_SYSCALL(x, reg) \ |
Chris Metcalf | d929b6a | 2010-10-14 14:34:33 -0400 | [diff] [blame] | 1491 | STD_ENTRY(_##x); \ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1492 | { \ |
| 1493 | PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ |
Chris Metcalf | d929b6a | 2010-10-14 14:34:33 -0400 | [diff] [blame] | 1494 | j x \ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1495 | }; \ |
Chris Metcalf | d929b6a | 2010-10-14 14:34:33 -0400 | [diff] [blame] | 1496 | STD_ENDPROC(_##x) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1497 | |
Chris Metcalf | 81711ce | 2010-12-14 16:07:25 -0500 | [diff] [blame] | 1498 | /* |
| 1499 | * Special-case sigreturn to not write r0 to the stack on return. |
| 1500 | * This is technically more efficient, but it also avoids difficulties |
| 1501 | * in the 64-bit OS when handling 32-bit compat code, since we must not |
| 1502 | * sign-extend r0 for the sigreturn return-value case. |
| 1503 | */ |
| 1504 | #define PTREGS_SYSCALL_SIGRETURN(x, reg) \ |
| 1505 | STD_ENTRY(_##x); \ |
| 1506 | addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \ |
| 1507 | { \ |
| 1508 | PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ |
| 1509 | j x \ |
| 1510 | }; \ |
| 1511 | STD_ENDPROC(_##x) |
| 1512 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1513 | PTREGS_SYSCALL(sys_execve, r3) |
| 1514 | PTREGS_SYSCALL(sys_sigaltstack, r2) |
Chris Metcalf | 81711ce | 2010-12-14 16:07:25 -0500 | [diff] [blame] | 1515 | PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0) |
Chris Metcalf | d929b6a | 2010-10-14 14:34:33 -0400 | [diff] [blame] | 1516 | PTREGS_SYSCALL(sys_cmpxchg_badaddr, r1) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1517 | |
Chris Metcalf | d929b6a | 2010-10-14 14:34:33 -0400 | [diff] [blame] | 1518 | /* Save additional callee-saves to pt_regs, put address in r4 and jump. */ |
| 1519 | STD_ENTRY(_sys_clone) |
| 1520 | push_extra_callee_saves r4 |
| 1521 | j sys_clone |
| 1522 | STD_ENDPROC(_sys_clone) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1523 | |
| 1524 | /* |
| 1525 | * This entrypoint is taken for the cmpxchg and atomic_update fast |
| 1526 | * swints. We may wish to generalize it to other fast swints at some |
| 1527 | * point, but for now there are just two very similar ones, which |
| 1528 | * makes it faster. |
| 1529 | * |
| 1530 | * The fast swint code is designed to have a small footprint. It does |
| 1531 | * not save or restore any GPRs, counting on the caller-save registers |
| 1532 | * to be available to it on entry. It does not modify any callee-save |
| 1533 | * registers (including "lr"). It does not check what PL it is being |
| 1534 | * called at, so you'd better not call it other than at PL0. |
Chris Metcalf | d6f0f22 | 2010-10-14 14:42:58 -0400 | [diff] [blame] | 1535 | * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if |
| 1536 | * it ever is necessary to use more registers, be aware. |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1537 | * |
| 1538 | * It does not use the stack, but since it might be re-interrupted by |
| 1539 | * a page fault which would assume the stack was valid, it does |
| 1540 | * save/restore the stack pointer and zero it out to make sure it gets reset. |
| 1541 | * Since we always keep interrupts disabled, the hypervisor won't |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1542 | * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1543 | * (other than to advance the PC on return). |
| 1544 | * |
| 1545 | * We have to manually validate the user vs kernel address range |
| 1546 | * (since at PL1 we can read/write both), and for performance reasons |
| 1547 | * we don't allow cmpxchg on the fc000000 memory region, since we only |
| 1548 | * validate that the user address is below PAGE_OFFSET. |
| 1549 | * |
| 1550 | * We place it in the __HEAD section to ensure it is relatively |
| 1551 | * near to the intvec_SWINT_1 code (reachable by a conditional branch). |
| 1552 | * |
| 1553 | * Must match register usage in do_page_fault(). |
| 1554 | */ |
| 1555 | __HEAD |
| 1556 | .align 64 |
| 1557 | /* Align much later jump on the start of a cache line. */ |
| 1558 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 1559 | nop |
| 1560 | #if PAGE_SIZE >= 0x10000 |
| 1561 | nop |
| 1562 | #endif |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1563 | #endif |
| 1564 | ENTRY(sys_cmpxchg) |
| 1565 | |
| 1566 | /* |
| 1567 | * Save "sp" and set it zero for any possible page fault. |
| 1568 | * |
| 1569 | * HACK: We want to both zero sp and check r0's alignment, |
| 1570 | * so we do both at once. If "sp" becomes nonzero we |
| 1571 | * know r0 is unaligned and branch to the error handler that |
| 1572 | * restores sp, so this is OK. |
| 1573 | * |
| 1574 | * ICS is disabled right now so having a garbage but nonzero |
| 1575 | * sp is OK, since we won't execute any faulting instructions |
| 1576 | * when it is nonzero. |
| 1577 | */ |
| 1578 | { |
| 1579 | move r27, sp |
| 1580 | andi sp, r0, 3 |
| 1581 | } |
| 1582 | |
| 1583 | /* |
| 1584 | * Get the lock address in ATOMIC_LOCK_REG, and also validate that the |
| 1585 | * address is less than PAGE_OFFSET, since that won't trap at PL1. |
| 1586 | * We only use bits less than PAGE_SHIFT to avoid having to worry |
| 1587 | * about aliasing among multiple mappings of the same physical page, |
| 1588 | * and we ignore the low 3 bits so we have one lock that covers |
| 1589 | * both a cmpxchg64() and a cmpxchg() on either its low or high word. |
Chris Metcalf | 5fb682b | 2011-02-28 15:58:39 -0500 | [diff] [blame] | 1590 | * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c. |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1591 | */ |
| 1592 | |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 1593 | #if (PAGE_OFFSET & 0xffff) != 0 |
| 1594 | # error Code here assumes PAGE_OFFSET can be loaded with just hi16() |
| 1595 | #endif |
| 1596 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1597 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() |
| 1598 | { |
| 1599 | /* Check for unaligned input. */ |
| 1600 | bnz sp, .Lcmpxchg_badaddr |
| 1601 | mm r25, r0, zero, 3, PAGE_SHIFT-1 |
| 1602 | } |
| 1603 | { |
| 1604 | crc32_32 r25, zero, r25 |
| 1605 | moveli r21, lo16(atomic_lock_ptr) |
| 1606 | } |
| 1607 | { |
| 1608 | auli r21, r21, ha16(atomic_lock_ptr) |
| 1609 | auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */ |
| 1610 | } |
| 1611 | { |
| 1612 | shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT |
| 1613 | slt_u r23, r0, r23 |
| 1614 | |
| 1615 | /* |
| 1616 | * Ensure that the TLB is loaded before we take out the lock. |
| 1617 | * On TILEPro, this will start fetching the value all the way |
| 1618 | * into our L1 as well (and if it gets modified before we |
| 1619 | * grab the lock, it will be invalidated from our cache |
| 1620 | * before we reload it). On tile64, we'll start fetching it |
| 1621 | * into our L1 if we're the home, and if we're not, we'll |
| 1622 | * still at least start fetching it into the home's L2. |
| 1623 | */ |
| 1624 | lw r26, r0 |
| 1625 | } |
| 1626 | { |
| 1627 | s2a r21, r20, r21 |
| 1628 | bbns r23, .Lcmpxchg_badaddr |
| 1629 | } |
| 1630 | { |
| 1631 | lw r21, r21 |
| 1632 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64 |
| 1633 | andi r25, r25, ATOMIC_HASH_L2_SIZE - 1 |
| 1634 | } |
| 1635 | { |
| 1636 | /* Branch away at this point if we're doing a 64-bit cmpxchg. */ |
| 1637 | bbs r23, .Lcmpxchg64 |
| 1638 | andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ |
| 1639 | } |
| 1640 | |
| 1641 | { |
| 1642 | /* |
| 1643 | * We very carefully align the code that actually runs with |
| 1644 | * the lock held (nine bundles) so that we know it is all in |
| 1645 | * the icache when we start. This instruction (the jump) is |
| 1646 | * at the start of the first cache line, address zero mod 64; |
| 1647 | * we jump to somewhere in the second cache line to issue the |
| 1648 | * tns, then jump back to finish up. |
| 1649 | */ |
| 1650 | s2a ATOMIC_LOCK_REG_NAME, r25, r21 |
| 1651 | j .Lcmpxchg32_tns |
| 1652 | } |
| 1653 | |
| 1654 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ |
| 1655 | { |
| 1656 | /* Check for unaligned input. */ |
| 1657 | bnz sp, .Lcmpxchg_badaddr |
| 1658 | auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */ |
| 1659 | } |
| 1660 | { |
| 1661 | /* |
| 1662 | * Slide bits into position for 'mm'. We want to ignore |
| 1663 | * the low 3 bits of r0, and consider only the next |
| 1664 | * ATOMIC_HASH_SHIFT bits. |
| 1665 | * Because of C pointer arithmetic, we want to compute this: |
| 1666 | * |
| 1667 | * ((char*)atomic_locks + |
| 1668 | * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2)) |
| 1669 | * |
| 1670 | * Instead of two shifts we just ">> 1", and use 'mm' |
| 1671 | * to ignore the low and high bits we don't want. |
| 1672 | */ |
| 1673 | shri r25, r0, 1 |
| 1674 | |
| 1675 | slt_u r23, r0, r23 |
| 1676 | |
| 1677 | /* |
| 1678 | * Ensure that the TLB is loaded before we take out the lock. |
| 1679 | * On tilepro, this will start fetching the value all the way |
| 1680 | * into our L1 as well (and if it gets modified before we |
| 1681 | * grab the lock, it will be invalidated from our cache |
| 1682 | * before we reload it). On tile64, we'll start fetching it |
| 1683 | * into our L1 if we're the home, and if we're not, we'll |
| 1684 | * still at least start fetching it into the home's L2. |
| 1685 | */ |
| 1686 | lw r26, r0 |
| 1687 | } |
| 1688 | { |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 1689 | auli r21, zero, ha16(atomic_locks) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1690 | |
| 1691 | bbns r23, .Lcmpxchg_badaddr |
| 1692 | } |
Chris Metcalf | 76c567f | 2011-02-28 16:37:34 -0500 | [diff] [blame] | 1693 | #if PAGE_SIZE < 0x10000 |
| 1694 | /* atomic_locks is page-aligned so for big pages we don't need this. */ |
| 1695 | addli r21, r21, lo16(atomic_locks) |
| 1696 | #endif |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1697 | { |
| 1698 | /* |
| 1699 | * Insert the hash bits into the page-aligned pointer. |
| 1700 | * ATOMIC_HASH_SHIFT is so big that we don't actually hash |
| 1701 | * the unmasked address bits, as that may cause unnecessary |
| 1702 | * collisions. |
| 1703 | */ |
| 1704 | mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1 |
| 1705 | |
| 1706 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64 |
| 1707 | } |
| 1708 | { |
| 1709 | /* Branch away at this point if we're doing a 64-bit cmpxchg. */ |
| 1710 | bbs r23, .Lcmpxchg64 |
| 1711 | andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ |
| 1712 | } |
| 1713 | { |
| 1714 | /* |
| 1715 | * We very carefully align the code that actually runs with |
| 1716 | * the lock held (nine bundles) so that we know it is all in |
| 1717 | * the icache when we start. This instruction (the jump) is |
| 1718 | * at the start of the first cache line, address zero mod 64; |
| 1719 | * we jump to somewhere in the second cache line to issue the |
| 1720 | * tns, then jump back to finish up. |
| 1721 | */ |
| 1722 | j .Lcmpxchg32_tns |
| 1723 | } |
| 1724 | |
| 1725 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ |
| 1726 | |
| 1727 | ENTRY(__sys_cmpxchg_grab_lock) |
| 1728 | |
| 1729 | /* |
| 1730 | * Perform the actual cmpxchg or atomic_update. |
Chris Metcalf | 5fb682b | 2011-02-28 15:58:39 -0500 | [diff] [blame] | 1731 | * Note that the system <arch/atomic.h> header relies on |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1732 | * atomic_update() to always perform an "mf", so don't make |
| 1733 | * it optional or conditional without modifying that code. |
| 1734 | */ |
| 1735 | .Ldo_cmpxchg32: |
| 1736 | { |
| 1737 | lw r21, r0 |
| 1738 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update |
| 1739 | move r24, r2 |
| 1740 | } |
| 1741 | { |
| 1742 | seq r22, r21, r1 /* See if cmpxchg matches. */ |
| 1743 | and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */ |
| 1744 | } |
| 1745 | { |
| 1746 | or r22, r22, r23 /* Skip compare branch for atomic_update. */ |
| 1747 | add r25, r25, r2 /* Compute (*mem & mask) + addend. */ |
| 1748 | } |
| 1749 | { |
| 1750 | mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */ |
| 1751 | bbns r22, .Lcmpxchg32_mismatch |
| 1752 | } |
| 1753 | sw r0, r24 |
| 1754 | |
| 1755 | /* Do slow mtspr here so the following "mf" waits less. */ |
| 1756 | { |
| 1757 | move sp, r27 |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1758 | mtspr SPR_EX_CONTEXT_K_0, r28 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1759 | } |
| 1760 | mf |
| 1761 | |
| 1762 | /* The following instruction is the start of the second cache line. */ |
| 1763 | { |
| 1764 | move r0, r21 |
| 1765 | sw ATOMIC_LOCK_REG_NAME, zero |
| 1766 | } |
| 1767 | iret |
| 1768 | |
| 1769 | /* Duplicated code here in the case where we don't overlap "mf" */ |
| 1770 | .Lcmpxchg32_mismatch: |
| 1771 | { |
| 1772 | move r0, r21 |
| 1773 | sw ATOMIC_LOCK_REG_NAME, zero |
| 1774 | } |
| 1775 | { |
| 1776 | move sp, r27 |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1777 | mtspr SPR_EX_CONTEXT_K_0, r28 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1778 | } |
| 1779 | iret |
| 1780 | |
| 1781 | /* |
| 1782 | * The locking code is the same for 32-bit cmpxchg/atomic_update, |
| 1783 | * and for 64-bit cmpxchg. We provide it as a macro and put |
| 1784 | * it into both versions. We can't share the code literally |
| 1785 | * since it depends on having the right branch-back address. |
| 1786 | * Note that the first few instructions should share the cache |
| 1787 | * line with the second half of the actual locked code. |
| 1788 | */ |
| 1789 | .macro cmpxchg_lock, bitwidth |
| 1790 | |
| 1791 | /* Lock; if we succeed, jump back up to the read-modify-write. */ |
| 1792 | #ifdef CONFIG_SMP |
| 1793 | tns r21, ATOMIC_LOCK_REG_NAME |
| 1794 | #else |
| 1795 | /* |
| 1796 | * Non-SMP preserves all the lock infrastructure, to keep the |
| 1797 | * code simpler for the interesting (SMP) case. However, we do |
| 1798 | * one small optimization here and in atomic_asm.S, which is |
| 1799 | * to fake out acquiring the actual lock in the atomic_lock table. |
| 1800 | */ |
| 1801 | movei r21, 0 |
| 1802 | #endif |
| 1803 | |
| 1804 | /* Issue the slow SPR here while the tns result is in flight. */ |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1805 | mfspr r28, SPR_EX_CONTEXT_K_0 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1806 | |
| 1807 | { |
| 1808 | addi r28, r28, 8 /* return to the instruction after the swint1 */ |
| 1809 | bzt r21, .Ldo_cmpxchg\bitwidth |
| 1810 | } |
| 1811 | /* |
| 1812 | * The preceding instruction is the last thing that must be |
| 1813 | * on the second cache line. |
| 1814 | */ |
| 1815 | |
| 1816 | #ifdef CONFIG_SMP |
| 1817 | /* |
| 1818 | * We failed to acquire the tns lock on our first try. Now use |
| 1819 | * bounded exponential backoff to retry, like __atomic_spinlock(). |
| 1820 | */ |
| 1821 | { |
| 1822 | moveli r23, 2048 /* maximum backoff time in cycles */ |
| 1823 | moveli r25, 32 /* starting backoff time in cycles */ |
| 1824 | } |
| 1825 | 1: mfspr r26, CYCLE_LOW /* get start point for this backoff */ |
| 1826 | 2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */ |
| 1827 | sub r22, r22, r26 |
| 1828 | slt r22, r22, r25 |
| 1829 | bbst r22, 2b |
| 1830 | { |
| 1831 | shli r25, r25, 1 /* double the backoff; retry the tns */ |
| 1832 | tns r21, ATOMIC_LOCK_REG_NAME |
| 1833 | } |
| 1834 | slt r26, r23, r25 /* is the proposed backoff too big? */ |
| 1835 | { |
| 1836 | mvnz r25, r26, r23 |
| 1837 | bzt r21, .Ldo_cmpxchg\bitwidth |
| 1838 | } |
| 1839 | j 1b |
| 1840 | #endif /* CONFIG_SMP */ |
| 1841 | .endm |
| 1842 | |
| 1843 | .Lcmpxchg32_tns: |
| 1844 | cmpxchg_lock 32 |
| 1845 | |
| 1846 | /* |
| 1847 | * This code is invoked from sys_cmpxchg after most of the |
| 1848 | * preconditions have been checked. We still need to check |
| 1849 | * that r0 is 8-byte aligned, since if it's not we won't |
| 1850 | * actually be atomic. However, ATOMIC_LOCK_REG has the atomic |
| 1851 | * lock pointer and r27/r28 have the saved SP/PC. |
| 1852 | * r23 is holding "r0 & 7" so we can test for alignment. |
| 1853 | * The compare value is in r2/r3; the new value is in r4/r5. |
| 1854 | * On return, we must put the old value in r0/r1. |
| 1855 | */ |
| 1856 | .align 64 |
| 1857 | .Lcmpxchg64: |
| 1858 | { |
| 1859 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() |
| 1860 | s2a ATOMIC_LOCK_REG_NAME, r25, r21 |
| 1861 | #endif |
| 1862 | bzt r23, .Lcmpxchg64_tns |
| 1863 | } |
| 1864 | j .Lcmpxchg_badaddr |
| 1865 | |
| 1866 | .Ldo_cmpxchg64: |
| 1867 | { |
| 1868 | lw r21, r0 |
| 1869 | addi r25, r0, 4 |
| 1870 | } |
| 1871 | { |
| 1872 | lw r1, r25 |
| 1873 | } |
| 1874 | seq r26, r21, r2 |
| 1875 | { |
| 1876 | bz r26, .Lcmpxchg64_mismatch |
| 1877 | seq r26, r1, r3 |
| 1878 | } |
| 1879 | { |
| 1880 | bz r26, .Lcmpxchg64_mismatch |
| 1881 | } |
| 1882 | sw r0, r4 |
| 1883 | sw r25, r5 |
| 1884 | |
| 1885 | /* |
| 1886 | * The 32-bit path provides optimized "match" and "mismatch" |
| 1887 | * iret paths, but we don't have enough bundles in this cache line |
| 1888 | * to do that, so we just make even the "mismatch" path do an "mf". |
| 1889 | */ |
| 1890 | .Lcmpxchg64_mismatch: |
| 1891 | { |
| 1892 | move sp, r27 |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1893 | mtspr SPR_EX_CONTEXT_K_0, r28 |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1894 | } |
| 1895 | mf |
| 1896 | { |
| 1897 | move r0, r21 |
| 1898 | sw ATOMIC_LOCK_REG_NAME, zero |
| 1899 | } |
| 1900 | iret |
| 1901 | |
| 1902 | .Lcmpxchg64_tns: |
| 1903 | cmpxchg_lock 64 |
| 1904 | |
| 1905 | |
| 1906 | /* |
| 1907 | * Reset sp and revector to sys_cmpxchg_badaddr(), which will |
| 1908 | * just raise the appropriate signal and exit. Doing it this |
| 1909 | * way means we don't have to duplicate the code in intvec.S's |
| 1910 | * int_hand macro that locates the top of the stack. |
| 1911 | */ |
| 1912 | .Lcmpxchg_badaddr: |
| 1913 | { |
| 1914 | moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr |
| 1915 | move sp, r27 |
| 1916 | } |
| 1917 | j intvec_SWINT_1 |
| 1918 | ENDPROC(sys_cmpxchg) |
| 1919 | ENTRY(__sys_cmpxchg_end) |
| 1920 | |
| 1921 | |
| 1922 | /* The single-step support may need to read all the registers. */ |
| 1923 | int_unalign: |
| 1924 | push_extra_callee_saves r0 |
| 1925 | j do_trap |
| 1926 | |
| 1927 | /* Include .intrpt1 array of interrupt vectors */ |
| 1928 | .section ".intrpt1", "ax" |
| 1929 | |
| 1930 | #define op_handle_perf_interrupt bad_intr |
| 1931 | #define op_handle_aux_perf_interrupt bad_intr |
| 1932 | |
Chris Metcalf | 9f9c038 | 2010-06-25 17:00:56 -0400 | [diff] [blame] | 1933 | #ifndef CONFIG_HARDWALL |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1934 | #define do_hardwall_trap bad_intr |
Chris Metcalf | 9f9c038 | 2010-06-25 17:00:56 -0400 | [diff] [blame] | 1935 | #endif |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1936 | |
| 1937 | int_hand INT_ITLB_MISS, ITLB_MISS, \ |
| 1938 | do_page_fault, handle_interrupt_no_single_step |
| 1939 | int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr |
| 1940 | int_hand INT_ILL, ILL, do_trap, handle_ill |
| 1941 | int_hand INT_GPV, GPV, do_trap |
| 1942 | int_hand INT_SN_ACCESS, SN_ACCESS, do_trap |
| 1943 | int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap |
| 1944 | int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap |
| 1945 | int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr |
| 1946 | int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr |
| 1947 | int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr |
| 1948 | int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr |
| 1949 | int_hand INT_SWINT_3, SWINT_3, do_trap |
| 1950 | int_hand INT_SWINT_2, SWINT_2, do_trap |
| 1951 | int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall |
| 1952 | int_hand INT_SWINT_0, SWINT_0, do_trap |
| 1953 | int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign |
| 1954 | int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault |
| 1955 | int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault |
| 1956 | int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault |
| 1957 | int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault |
| 1958 | int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault |
| 1959 | int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr |
| 1960 | int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap |
| 1961 | int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr |
| 1962 | int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap |
| 1963 | int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt |
| 1964 | int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr |
| 1965 | int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr |
| 1966 | int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr |
| 1967 | int_hand INT_IDN_CA, IDN_CA, bad_intr |
| 1968 | int_hand INT_UDN_CA, UDN_CA, bad_intr |
| 1969 | int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr |
| 1970 | int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr |
| 1971 | int_hand INT_PERF_COUNT, PERF_COUNT, \ |
| 1972 | op_handle_perf_interrupt, handle_nmi |
| 1973 | int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1974 | #if CONFIG_KERNEL_PL == 2 |
| 1975 | dc_dispatch INT_INTCTRL_2, INTCTRL_2 |
| 1976 | int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr |
| 1977 | #else |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1978 | int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr |
| 1979 | dc_dispatch INT_INTCTRL_1, INTCTRL_1 |
Chris Metcalf | a78c942 | 2010-10-14 16:23:03 -0400 | [diff] [blame] | 1980 | #endif |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1981 | int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr |
| 1982 | int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ |
Chris Metcalf | b2ce2bd | 2011-02-28 13:24:37 -0500 | [diff] [blame] | 1983 | hv_message_intr |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1984 | int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \ |
Chris Metcalf | b2ce2bd | 2011-02-28 13:24:37 -0500 | [diff] [blame] | 1985 | tile_dev_intr |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1986 | int_hand INT_I_ASID, I_ASID, bad_intr |
| 1987 | int_hand INT_D_ASID, D_ASID, bad_intr |
| 1988 | int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \ |
Chris Metcalf | b2ce2bd | 2011-02-28 13:24:37 -0500 | [diff] [blame] | 1989 | do_page_fault |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1990 | int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \ |
Chris Metcalf | b2ce2bd | 2011-02-28 13:24:37 -0500 | [diff] [blame] | 1991 | do_page_fault |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1992 | int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \ |
Chris Metcalf | b2ce2bd | 2011-02-28 13:24:37 -0500 | [diff] [blame] | 1993 | do_page_fault |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1994 | int_hand INT_SN_CPL, SN_CPL, bad_intr |
| 1995 | int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap |
| 1996 | #if CHIP_HAS_AUX_PERF_COUNTERS() |
| 1997 | int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ |
| 1998 | op_handle_aux_perf_interrupt, handle_nmi |
| 1999 | #endif |
| 2000 | |
| 2001 | /* Synthetic interrupt delivered only by the simulator */ |
| 2002 | int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint |