blob: a55518a0d266610326bab050a52a29a81ecad919 [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010022#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
Paolo Bonzinif08b6172014-03-28 19:42:10 +010025#include "exec/cpu_ldst.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000026
Paolo Bonzini022c62c2012-12-17 18:19:49 +010027#include "exec/cputlb.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +010031#include "tcg/tcg.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000032
33//#define DEBUG_TLB
34//#define DEBUG_TLB_CHECK
35
36/* statistics */
37int tlb_flush_count;
38
Blue Swirl0cac1b62012-04-09 16:50:52 +000039/* NOTE:
40 * If flush_global is true (the usual case), flush all tlb entries.
41 * If flush_global is false, flush (at least) all tlb entries not
42 * marked global.
43 *
44 * Since QEMU doesn't currently implement a global/not-global flag
45 * for tlb entries, at the moment tlb_flush() will also flush all
46 * tlb entries in the flush_global == false case. This is OK because
47 * CPU architectures generally permit an implementation to drop
48 * entries from the TLB at any time, so flushing more entries than
49 * required is only an efficiency issue, not a correctness issue.
50 */
Andreas Färber00c8cb02013-09-04 02:19:44 +020051void tlb_flush(CPUState *cpu, int flush_global)
Blue Swirl0cac1b62012-04-09 16:50:52 +000052{
Andreas Färber00c8cb02013-09-04 02:19:44 +020053 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
55#if defined(DEBUG_TLB)
56 printf("tlb_flush:\n");
57#endif
58 /* must reset current TB so that interrupts cannot modify the
59 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +010060 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +000061
Richard Henderson4fadb3b2013-12-07 10:44:51 +130062 memset(env->tlb_table, -1, sizeof(env->tlb_table));
Xin Tong88e89a52014-08-04 20:35:23 -050063 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
Andreas Färber8cd70432013-08-26 06:03:38 +020064 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
Blue Swirl0cac1b62012-04-09 16:50:52 +000065
Xin Tong88e89a52014-08-04 20:35:23 -050066 env->vtlb_index = 0;
Blue Swirl0cac1b62012-04-09 16:50:52 +000067 env->tlb_flush_addr = -1;
68 env->tlb_flush_mask = 0;
69 tlb_flush_count++;
70}
71
72static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
73{
74 if (addr == (tlb_entry->addr_read &
75 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
76 addr == (tlb_entry->addr_write &
77 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
78 addr == (tlb_entry->addr_code &
79 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +130080 memset(tlb_entry, -1, sizeof(*tlb_entry));
Blue Swirl0cac1b62012-04-09 16:50:52 +000081 }
82}
83
Andreas Färber31b030d2013-09-04 01:29:02 +020084void tlb_flush_page(CPUState *cpu, target_ulong addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +000085{
Andreas Färber31b030d2013-09-04 01:29:02 +020086 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000087 int i;
88 int mmu_idx;
89
90#if defined(DEBUG_TLB)
91 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
92#endif
93 /* Check if we need to flush due to large pages. */
94 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
95#if defined(DEBUG_TLB)
96 printf("tlb_flush_page: forced full flush ("
97 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
98 env->tlb_flush_addr, env->tlb_flush_mask);
99#endif
Andreas Färber00c8cb02013-09-04 02:19:44 +0200100 tlb_flush(cpu, 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000101 return;
102 }
103 /* must reset current TB so that interrupts cannot modify the
104 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +0100105 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000106
107 addr &= TARGET_PAGE_MASK;
108 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
109 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
110 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
111 }
112
Xin Tong88e89a52014-08-04 20:35:23 -0500113 /* check whether there are entries that need to be flushed in the vtlb */
114 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
115 int k;
116 for (k = 0; k < CPU_VTLB_SIZE; k++) {
117 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
118 }
119 }
120
Andreas Färber611d4f92013-09-01 17:52:07 +0200121 tb_flush_jmp_cache(cpu, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000122}
123
124/* update the TLBs so that writes to code in the virtual page 'addr'
125 can be detected */
126void tlb_protect_code(ram_addr_t ram_addr)
127{
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200128 cpu_physical_memory_reset_dirty(ram_addr, TARGET_PAGE_SIZE,
Juan Quintela52159192013-10-08 12:44:04 +0200129 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000130}
131
132/* update the TLB so that writes in physical page 'phys_addr' are no longer
133 tested for self modifying code */
Andreas Färberbaea4fa2013-09-03 10:51:26 +0200134void tlb_unprotect_code_phys(CPUState *cpu, ram_addr_t ram_addr,
Blue Swirl0cac1b62012-04-09 16:50:52 +0000135 target_ulong vaddr)
136{
Juan Quintela52159192013-10-08 12:44:04 +0200137 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000138}
139
140static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
141{
142 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
143}
144
145void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
146 uintptr_t length)
147{
148 uintptr_t addr;
149
150 if (tlb_is_dirty_ram(tlb_entry)) {
151 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
152 if ((addr - start) < length) {
153 tlb_entry->addr_write |= TLB_NOTDIRTY;
154 }
155 }
156}
157
Paolo Bonzini7443b432013-06-03 12:44:02 +0200158static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
159{
160 ram_addr_t ram_addr;
161
Paolo Bonzini1b5ec232013-05-06 14:36:15 +0200162 if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
Paolo Bonzini7443b432013-06-03 12:44:02 +0200163 fprintf(stderr, "Bad ram pointer %p\n", ptr);
164 abort();
165 }
166 return ram_addr;
167}
168
Blue Swirl0cac1b62012-04-09 16:50:52 +0000169void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
170{
Andreas Färber182735e2013-05-29 22:29:20 +0200171 CPUState *cpu;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000172 CPUArchState *env;
173
Andreas Färberbdc44642013-06-24 23:50:24 +0200174 CPU_FOREACH(cpu) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000175 int mmu_idx;
176
Andreas Färber182735e2013-05-29 22:29:20 +0200177 env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000178 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
179 unsigned int i;
180
181 for (i = 0; i < CPU_TLB_SIZE; i++) {
182 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
183 start1, length);
184 }
Xin Tong88e89a52014-08-04 20:35:23 -0500185
186 for (i = 0; i < CPU_VTLB_SIZE; i++) {
187 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
188 start1, length);
189 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000190 }
191 }
192}
193
194static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
195{
196 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
197 tlb_entry->addr_write = vaddr;
198 }
199}
200
201/* update the TLB corresponding to virtual page vaddr
202 so that it is no longer dirty */
203void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
204{
205 int i;
206 int mmu_idx;
207
208 vaddr &= TARGET_PAGE_MASK;
209 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
210 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
211 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
212 }
Xin Tong88e89a52014-08-04 20:35:23 -0500213
214 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
215 int k;
216 for (k = 0; k < CPU_VTLB_SIZE; k++) {
217 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
218 }
219 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000220}
221
222/* Our TLB does not support large pages, so remember the area covered by
223 large pages and trigger a full TLB flush if these are invalidated. */
224static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
225 target_ulong size)
226{
227 target_ulong mask = ~(size - 1);
228
229 if (env->tlb_flush_addr == (target_ulong)-1) {
230 env->tlb_flush_addr = vaddr & mask;
231 env->tlb_flush_mask = mask;
232 return;
233 }
234 /* Extend the existing region to include the new page.
235 This is a compromise between unnecessary flushes and the cost
236 of maintaining a full variable size TLB. */
237 mask &= env->tlb_flush_mask;
238 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
239 mask <<= 1;
240 }
241 env->tlb_flush_addr &= mask;
242 env->tlb_flush_mask = mask;
243}
244
245/* Add a new TLB entry. At most one entry for a given virtual address
246 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
247 supplied size is only used by tlb_flush_page. */
Andreas Färber0c591eb2013-09-03 13:59:37 +0200248void tlb_set_page(CPUState *cpu, target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200249 hwaddr paddr, int prot,
Blue Swirl0cac1b62012-04-09 16:50:52 +0000250 int mmu_idx, target_ulong size)
251{
Andreas Färber0c591eb2013-09-03 13:59:37 +0200252 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000253 MemoryRegionSection *section;
254 unsigned int index;
255 target_ulong address;
256 target_ulong code_address;
257 uintptr_t addend;
258 CPUTLBEntry *te;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200259 hwaddr iotlb, xlat, sz;
Xin Tong88e89a52014-08-04 20:35:23 -0500260 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000261
262 assert(size >= TARGET_PAGE_SIZE);
263 if (size != TARGET_PAGE_SIZE) {
264 tlb_add_large_page(env, vaddr, size);
265 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200266
267 sz = size;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000268 section = address_space_translate_for_iotlb(cpu->as, paddr,
Jan Kiszka90260c62013-05-26 21:46:51 +0200269 &xlat, &sz);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200270 assert(sz >= TARGET_PAGE_SIZE);
271
Blue Swirl0cac1b62012-04-09 16:50:52 +0000272#if defined(DEBUG_TLB)
273 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
Hervé Poussineau54b949d2013-06-05 20:16:42 +0800274 " prot=%x idx=%d\n",
275 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000276#endif
277
278 address = vaddr;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200279 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
280 /* IO memory case */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000281 address |= TLB_MMIO;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200282 addend = 0;
283 } else {
284 /* TLB_MMIO for rom/romd handled below */
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200285 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000286 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000287
288 code_address = address;
Andreas Färberbb0e6272013-09-03 13:32:01 +0200289 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200290 prot, &address);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000291
292 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000293 te = &env->tlb_table[mmu_idx][index];
Xin Tong88e89a52014-08-04 20:35:23 -0500294
295 /* do not discard the translation in te, evict it into a victim tlb */
296 env->tlb_v_table[mmu_idx][vidx] = *te;
297 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
298
299 /* refill the tlb */
300 env->iotlb[mmu_idx][index] = iotlb - vaddr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000301 te->addend = addend - vaddr;
302 if (prot & PAGE_READ) {
303 te->addr_read = address;
304 } else {
305 te->addr_read = -1;
306 }
307
308 if (prot & PAGE_EXEC) {
309 te->addr_code = code_address;
310 } else {
311 te->addr_code = -1;
312 }
313 if (prot & PAGE_WRITE) {
314 if ((memory_region_is_ram(section->mr) && section->readonly)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000315 || memory_region_is_romd(section->mr)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000316 /* Write access calls the I/O callback. */
317 te->addr_write = address | TLB_MMIO;
318 } else if (memory_region_is_ram(section->mr)
Juan Quintelaa2cd8c82013-10-10 11:20:22 +0200319 && cpu_physical_memory_is_clean(section->mr->ram_addr
320 + xlat)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000321 te->addr_write = address | TLB_NOTDIRTY;
322 } else {
323 te->addr_write = address;
324 }
325 } else {
326 te->addr_write = -1;
327 }
328}
329
330/* NOTE: this function can trigger an exception */
331/* NOTE2: the returned address is not exactly the physical address: it
Peter Maydell116aae32012-08-10 17:14:05 +0100332 * is actually a ram_addr_t (in system mode; the user mode emulation
333 * version of this function returns a guest virtual address).
334 */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000335tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
336{
337 int mmu_idx, page_index, pd;
338 void *p;
339 MemoryRegion *mr;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000340 CPUState *cpu = ENV_GET_CPU(env1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000341
342 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
343 mmu_idx = cpu_mmu_index(env1);
344 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
345 (addr & TARGET_PAGE_MASK))) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000346 cpu_ldub_code(env1, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000347 }
348 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000349 mr = iotlb_to_region(cpu->as, pd);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000350 if (memory_region_is_unassigned(mr)) {
Andreas Färberc658b942013-05-27 06:49:53 +0200351 CPUClass *cc = CPU_GET_CLASS(cpu);
352
353 if (cc->do_unassigned_access) {
354 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
355 } else {
Andreas Färbera47dddd2013-09-03 17:38:47 +0200356 cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
Andreas Färberc658b942013-05-27 06:49:53 +0200357 TARGET_FMT_lx "\n", addr);
358 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000359 }
360 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
361 return qemu_ram_addr_from_host_nofail(p);
362}
363
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100364#define MMUSUFFIX _mmu
365
366#define SHIFT 0
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100367#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100368
369#define SHIFT 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100370#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100371
372#define SHIFT 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100373#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100374
375#define SHIFT 3
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100376#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100377#undef MMUSUFFIX
378
Blue Swirl0cac1b62012-04-09 16:50:52 +0000379#define MMUSUFFIX _cmmu
Stefan Weil7e4e8862014-04-28 19:20:00 +0200380#undef GETPC_ADJ
381#define GETPC_ADJ 0
382#undef GETRA
383#define GETRA() ((uintptr_t)0)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000384#define SOFTMMU_CODE_ACCESS
385
386#define SHIFT 0
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100387#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000388
389#define SHIFT 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100390#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000391
392#define SHIFT 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100393#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000394
395#define SHIFT 3
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100396#include "softmmu_template.h"