blob: 243e7b629af61f694b927f3004493e4d7a84d4f1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * r2300_switch.S: R2300 specific task switching code.
3 *
4 * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
5 * Copyright (C) 1994, 1995, 1996 by Andreas Busse
6 *
7 * Multi-cpu abstraction and macros for easier reading:
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 *
10 * Further modifications to make this work:
11 * Copyright (c) 1998-2000 Harald Koerfgen
12 */
13#include <linux/config.h>
14#include <asm/asm.h>
15#include <asm/cachectl.h>
16#include <asm/fpregdef.h>
17#include <asm/mipsregs.h>
18#include <asm/offset.h>
19#include <asm/page.h>
20#include <asm/regdef.h>
21#include <asm/stackframe.h>
22#include <asm/thread_info.h>
23
24#include <asm/asmmacro.h>
25
26 .set mips1
27 .align 5
28
29/*
30 * Offset to the current process status flags, the first 32 bytes of the
31 * stack are not used.
32 */
33#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
34
35/*
36 * FPU context is saved iff the process has used it's FPU in the current
37 * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
38 * space STATUS register should be 0, so that a process *always* starts its
39 * userland with FPU disabled after each context switch.
40 *
41 * FPU will be enabled as soon as the process accesses FPU again, through
42 * do_cpu() trap.
43 */
44
45/*
46 * task_struct *resume(task_struct *prev, task_struct *next,
47 * struct thread_info *next_ti) )
48 */
49LEAF(resume)
50#ifndef CONFIG_CPU_HAS_LLSC
51 sw zero, ll_bit
52#endif
53 mfc0 t1, CP0_STATUS
54 sw t1, THREAD_STATUS(a0)
55 cpu_save_nonscratch a0
56 sw ra, THREAD_REG31(a0)
57
58 /*
59 * check if we need to save FPU registers
60 */
61 lw t3, TASK_THREAD_INFO(a0)
62 lw t0, TI_FLAGS(t3)
63 li t1, _TIF_USEDFPU
64 and t2, t0, t1
65 beqz t2, 1f
66 nor t1, zero, t1
67
68 and t0, t0, t1
69 sw t0, TI_FLAGS(t3)
70
71 /*
72 * clear saved user stack CU1 bit
73 */
74 lw t0, ST_OFF(t3)
75 li t1, ~ST0_CU1
76 and t0, t0, t1
77 sw t0, ST_OFF(t3)
78
79 fpu_save_single a0, t0 # clobbers t0
80
811:
82 /*
83 * The order of restoring the registers takes care of the race
84 * updating $28, $29 and kernelsp without disabling ints.
85 */
86 move $28, a2
87 cpu_restore_nonscratch a1
88
89 addiu t1, $28, _THREAD_SIZE - 32
90 sw t1, kernelsp
91
92 mfc0 t1, CP0_STATUS /* Do we really need this? */
93 li a3, 0xff01
94 and t1, a3
95 lw a2, THREAD_STATUS(a1)
96 nor a3, $0, a3
97 and a2, a3
98 or a2, t1
99 mtc0 a2, CP0_STATUS
100 move v0, a0
101 jr ra
102 END(resume)
103
104/*
105 * Save a thread's fp context.
106 */
107LEAF(_save_fp)
108 fpu_save_single a0, t1 # clobbers t1
109 jr ra
110 END(_save_fp)
111
112/*
113 * Restore a thread's fp context.
114 */
115LEAF(_restore_fp)
116 fpu_restore_single a0, t1 # clobbers t1
117 jr ra
118 END(_restore_fp)
119
120/*
121 * Load the FPU with signalling NANS. This bit pattern we're using has
122 * the property that no matter whether considered as single or as double
123 * precision represents signaling NANS.
124 *
125 * We initialize fcr31 to rounding to nearest, no exceptions.
126 */
127
128#define FPU_DEFAULT 0x00000000
129
130LEAF(_init_fpu)
131 mfc0 t0, CP0_STATUS
132 li t1, ST0_CU1
133 or t0, t1
134 mtc0 t0, CP0_STATUS
135
136 li t1, FPU_DEFAULT
137 ctc1 t1, fcr31
138
139 li t0, -1
140
141 mtc1 t0, $f0
142 mtc1 t0, $f1
143 mtc1 t0, $f2
144 mtc1 t0, $f3
145 mtc1 t0, $f4
146 mtc1 t0, $f5
147 mtc1 t0, $f6
148 mtc1 t0, $f7
149 mtc1 t0, $f8
150 mtc1 t0, $f9
151 mtc1 t0, $f10
152 mtc1 t0, $f11
153 mtc1 t0, $f12
154 mtc1 t0, $f13
155 mtc1 t0, $f14
156 mtc1 t0, $f15
157 mtc1 t0, $f16
158 mtc1 t0, $f17
159 mtc1 t0, $f18
160 mtc1 t0, $f19
161 mtc1 t0, $f20
162 mtc1 t0, $f21
163 mtc1 t0, $f22
164 mtc1 t0, $f23
165 mtc1 t0, $f24
166 mtc1 t0, $f25
167 mtc1 t0, $f26
168 mtc1 t0, $f27
169 mtc1 t0, $f28
170 mtc1 t0, $f29
171 mtc1 t0, $f30
172 mtc1 t0, $f31
173 jr ra
174 END(_init_fpu)