blob: b3ee876526a814458ae6293f46c618df27e2ae41 [file] [log] [blame]
bellardd19893d2003-06-15 19:58:51 +00001/*
2 * Host code generation
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardd19893d2003-06-15 19:58:51 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellardd19893d2003-06-15 19:58:51 +000018 */
Blue Swirl5b6dd862012-12-02 16:04:43 +000019#ifdef _WIN32
20#include <windows.h>
Blue Swirl5b6dd862012-12-02 16:04:43 +000021#endif
Peter Maydell7b31bbc2016-01-26 18:16:56 +000022#include "qemu/osdep.h"
bellardd19893d2003-06-15 19:58:51 +000023
bellard20543962003-06-15 23:28:43 +000024
Blue Swirl5b6dd862012-12-02 16:04:43 +000025#include "qemu-common.h"
bellardaf5ad102004-01-04 23:28:12 +000026#define NO_CPU_IO_DEFS
bellardd3eead22003-09-30 20:59:51 +000027#include "cpu.h"
Daniel P. Berrange0ab8ed12017-01-25 16:14:15 +000028#include "trace-root.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020029#include "disas/disas.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010030#include "exec/exec-all.h"
bellard57fec1f2008-02-01 10:50:11 +000031#include "tcg.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000032#if defined(CONFIG_USER_ONLY)
33#include "qemu.h"
Alex Bennée301e40e2016-10-27 16:10:00 +010034#include "exec/exec-all.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000035#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
36#include <sys/param.h>
37#if __FreeBSD_version >= 700104
38#define HAVE_KINFO_GETVMMAP
39#define sigqueue sigqueue_freebsd /* avoid redefinition */
Blue Swirl5b6dd862012-12-02 16:04:43 +000040#include <sys/proc.h>
41#include <machine/profile.h>
42#define _KERNEL
43#include <sys/user.h>
44#undef _KERNEL
45#undef sigqueue
46#include <libutil.h>
47#endif
48#endif
Paolo Bonzini0bc3cd62013-04-08 17:29:59 +020049#else
50#include "exec/address-spaces.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000051#endif
52
Paolo Bonzini022c62c2012-12-17 18:19:49 +010053#include "exec/cputlb.h"
Peter Crosthwaitee1b89322015-05-30 23:11:45 -070054#include "exec/tb-hash.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000055#include "translate-all.h"
Emilio G. Cota510a6472015-04-22 17:50:52 -040056#include "qemu/bitmap.h"
Alexey Kardashevskiy0aa09892013-04-22 17:42:50 +100057#include "qemu/timer.h"
Jan Kiszka8d04fb52017-02-23 18:29:11 +000058#include "qemu/main-loop.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030059#include "exec/log.h"
Paolo Bonzinid2528bd2017-03-03 12:01:16 +010060#include "sysemu/cpus.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000061
Alex Bennée955939a2016-10-27 16:09:59 +010062/* #define DEBUG_TB_INVALIDATE */
63/* #define DEBUG_TB_FLUSH */
Blue Swirl5b6dd862012-12-02 16:04:43 +000064/* make various TB consistency checks */
Alex Bennée955939a2016-10-27 16:09:59 +010065/* #define DEBUG_TB_CHECK */
Blue Swirl5b6dd862012-12-02 16:04:43 +000066
67#if !defined(CONFIG_USER_ONLY)
68/* TB consistency checks only implemented for usermode emulation. */
69#undef DEBUG_TB_CHECK
70#endif
71
Alex Bennée301e40e2016-10-27 16:10:00 +010072/* Access to the various translations structures need to be serialised via locks
73 * for consistency. This is automatic for SoftMMU based system
74 * emulation due to its single threaded nature. In user-mode emulation
75 * access to the memory related structures are protected with the
76 * mmap_lock.
77 */
Alex Bennée301e40e2016-10-27 16:10:00 +010078#ifdef CONFIG_SOFTMMU
Alex Bennée2f169602017-02-23 18:29:13 +000079#define assert_memory_lock() tcg_debug_assert(have_tb_lock)
Alex Bennée301e40e2016-10-27 16:10:00 +010080#else
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +000081#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
Alex Bennée301e40e2016-10-27 16:10:00 +010082#endif
83
Blue Swirl5b6dd862012-12-02 16:04:43 +000084#define SMC_BITMAP_USE_THRESHOLD 10
85
Blue Swirl5b6dd862012-12-02 16:04:43 +000086typedef struct PageDesc {
87 /* list of TBs intersecting this ram page */
88 TranslationBlock *first_tb;
Paolo Bonzini6fad4592015-08-11 12:42:55 +020089#ifdef CONFIG_SOFTMMU
Blue Swirl5b6dd862012-12-02 16:04:43 +000090 /* in order to optimize self modifying code, we count the number
91 of lookups we do to a given page to use a bitmap */
92 unsigned int code_write_count;
Emilio G. Cota510a6472015-04-22 17:50:52 -040093 unsigned long *code_bitmap;
Paolo Bonzini6fad4592015-08-11 12:42:55 +020094#else
Blue Swirl5b6dd862012-12-02 16:04:43 +000095 unsigned long flags;
96#endif
97} PageDesc;
98
99/* In system mode we want L1_MAP to be based on ram offsets,
100 while in user mode we want it to be based on virtual addresses. */
101#if !defined(CONFIG_USER_ONLY)
102#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
103# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
104#else
105# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
106#endif
107#else
108# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
109#endif
110
Paolo Bonzini03f49952013-11-07 17:14:36 +0100111/* Size of the L2 (and L3, etc) page tables. */
112#define V_L2_BITS 10
113#define V_L2_SIZE (1 << V_L2_BITS)
114
Blue Swirl5b6dd862012-12-02 16:04:43 +0000115uintptr_t qemu_host_page_size;
Paolo Bonzini0c2d70c2015-12-02 13:00:54 +0100116intptr_t qemu_host_page_mask;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000117
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +0100118/*
119 * L1 Mapping properties
120 */
121static int v_l1_size;
122static int v_l1_shift;
123static int v_l2_levels;
124
125/* The bottom level has pointers to PageDesc, and is indexed by
126 * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
127 */
128#define V_L1_MIN_BITS 4
129#define V_L1_MAX_BITS (V_L2_BITS + 3)
130#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
131
132static void *l1_map[V_L1_MAX_SIZE];
Blue Swirl5b6dd862012-12-02 16:04:43 +0000133
bellard57fec1f2008-02-01 10:50:11 +0000134/* code generation context */
135TCGContext tcg_ctx;
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700136bool parallel_cpus;
bellardd19893d2003-06-15 19:58:51 +0000137
KONRAD Frederic677ef622015-08-10 17:27:02 +0200138/* translation block context */
KONRAD Frederic677ef622015-08-10 17:27:02 +0200139__thread int have_tb_lock;
KONRAD Frederic677ef622015-08-10 17:27:02 +0200140
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +0100141static void page_table_config_init(void)
142{
143 uint32_t v_l1_bits;
144
145 assert(TARGET_PAGE_BITS);
146 /* The bits remaining after N lower levels of page tables. */
147 v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
148 if (v_l1_bits < V_L1_MIN_BITS) {
149 v_l1_bits += V_L2_BITS;
150 }
151
152 v_l1_size = 1 << v_l1_bits;
153 v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
154 v_l2_levels = v_l1_shift / V_L2_BITS - 1;
155
156 assert(v_l1_bits <= V_L1_MAX_BITS);
157 assert(v_l1_shift % V_L2_BITS == 0);
158 assert(v_l2_levels >= 0);
159}
160
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +0000161#define assert_tb_locked() tcg_debug_assert(have_tb_lock)
162#define assert_tb_unlocked() tcg_debug_assert(!have_tb_lock)
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +0000163
KONRAD Frederic677ef622015-08-10 17:27:02 +0200164void tb_lock(void)
165{
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +0000166 assert_tb_unlocked();
KONRAD Frederic677ef622015-08-10 17:27:02 +0200167 qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
168 have_tb_lock++;
KONRAD Frederic677ef622015-08-10 17:27:02 +0200169}
170
171void tb_unlock(void)
172{
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +0000173 assert_tb_locked();
KONRAD Frederic677ef622015-08-10 17:27:02 +0200174 have_tb_lock--;
175 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
KONRAD Frederic677ef622015-08-10 17:27:02 +0200176}
177
178void tb_lock_reset(void)
179{
KONRAD Frederic677ef622015-08-10 17:27:02 +0200180 if (have_tb_lock) {
181 qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
182 have_tb_lock = 0;
183 }
KONRAD Frederic677ef622015-08-10 17:27:02 +0200184}
185
Blue Swirla8a826a2012-12-04 20:16:07 +0000186static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000187
bellard57fec1f2008-02-01 10:50:11 +0000188void cpu_gen_init(void)
189{
190 tcg_context_init(&tcg_ctx);
bellard57fec1f2008-02-01 10:50:11 +0000191}
192
Richard Hendersonfca8a502015-09-01 19:11:45 -0700193/* Encode VAL as a signed leb128 sequence at P.
194 Return P incremented past the encoded value. */
195static uint8_t *encode_sleb128(uint8_t *p, target_long val)
196{
197 int more, byte;
198
199 do {
200 byte = val & 0x7f;
201 val >>= 7;
202 more = !((val == 0 && (byte & 0x40) == 0)
203 || (val == -1 && (byte & 0x40) != 0));
204 if (more) {
205 byte |= 0x80;
206 }
207 *p++ = byte;
208 } while (more);
209
210 return p;
211}
212
213/* Decode a signed leb128 sequence at *PP; increment *PP past the
214 decoded value. Return the decoded value. */
215static target_long decode_sleb128(uint8_t **pp)
216{
217 uint8_t *p = *pp;
218 target_long val = 0;
219 int byte, shift = 0;
220
221 do {
222 byte = *p++;
223 val |= (target_ulong)(byte & 0x7f) << shift;
224 shift += 7;
225 } while (byte & 0x80);
226 if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
227 val |= -(target_ulong)1 << shift;
228 }
229
230 *pp = p;
231 return val;
232}
233
234/* Encode the data collected about the instructions while compiling TB.
235 Place the data at BLOCK, and return the number of bytes consumed.
236
237 The logical table consisits of TARGET_INSN_START_WORDS target_ulong's,
238 which come from the target's insn_start data, followed by a uintptr_t
239 which comes from the host pc of the end of the code implementing the insn.
240
241 Each line of the table is encoded as sleb128 deltas from the previous
242 line. The seed for the first line is { tb->pc, 0..., tb->tc_ptr }.
243 That is, the first column is seeded with the guest pc, the last column
244 with the host pc, and the middle columns with zeros. */
245
246static int encode_search(TranslationBlock *tb, uint8_t *block)
247{
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700248 uint8_t *highwater = tcg_ctx.code_gen_highwater;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700249 uint8_t *p = block;
250 int i, j, n;
251
252 tb->tc_search = block;
253
254 for (i = 0, n = tb->icount; i < n; ++i) {
255 target_ulong prev;
256
257 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
258 if (i == 0) {
259 prev = (j == 0 ? tb->pc : 0);
260 } else {
261 prev = tcg_ctx.gen_insn_data[i - 1][j];
262 }
263 p = encode_sleb128(p, tcg_ctx.gen_insn_data[i][j] - prev);
264 }
265 prev = (i == 0 ? 0 : tcg_ctx.gen_insn_end_off[i - 1]);
266 p = encode_sleb128(p, tcg_ctx.gen_insn_end_off[i] - prev);
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700267
268 /* Test for (pending) buffer overflow. The assumption is that any
269 one row beginning below the high water mark cannot overrun
270 the buffer completely. Thus we can test for overflow after
271 encoding a row without having to check during encoding. */
272 if (unlikely(p > highwater)) {
273 return -1;
274 }
Richard Hendersonfca8a502015-09-01 19:11:45 -0700275 }
276
277 return p - block;
278}
279
Paolo Bonzini7d7500d2016-10-27 16:10:03 +0100280/* The cpu state corresponding to 'searched_pc' is restored.
281 * Called with tb_lock held.
282 */
Andreas Färber74f10512013-09-01 17:02:58 +0200283static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
Blue Swirla8a826a2012-12-04 20:16:07 +0000284 uintptr_t searched_pc)
bellardd19893d2003-06-15 19:58:51 +0000285{
Richard Hendersonfca8a502015-09-01 19:11:45 -0700286 target_ulong data[TARGET_INSN_START_WORDS] = { tb->pc };
287 uintptr_t host_pc = (uintptr_t)tb->tc_ptr;
Andreas Färber74f10512013-09-01 17:02:58 +0200288 CPUArchState *env = cpu->env_ptr;
Richard Hendersonfca8a502015-09-01 19:11:45 -0700289 uint8_t *p = tb->tc_search;
290 int i, j, num_insns = tb->icount;
bellard57fec1f2008-02-01 10:50:11 +0000291#ifdef CONFIG_PROFILER
Richard Hendersonfca8a502015-09-01 19:11:45 -0700292 int64_t ti = profile_getclock();
bellard57fec1f2008-02-01 10:50:11 +0000293#endif
294
Richard Henderson01ecaf42016-07-26 06:09:16 +0530295 searched_pc -= GETPC_ADJ;
296
Richard Hendersonfca8a502015-09-01 19:11:45 -0700297 if (searched_pc < host_pc) {
298 return -1;
299 }
bellardd19893d2003-06-15 19:58:51 +0000300
Richard Hendersonfca8a502015-09-01 19:11:45 -0700301 /* Reconstruct the stored insn data while looking for the point at
302 which the end of the insn exceeds the searched_pc. */
303 for (i = 0; i < num_insns; ++i) {
304 for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
305 data[j] += decode_sleb128(&p);
306 }
307 host_pc += decode_sleb128(&p);
308 if (host_pc > searched_pc) {
309 goto found;
310 }
311 }
312 return -1;
ths3b46e622007-09-17 08:09:54 +0000313
Richard Hendersonfca8a502015-09-01 19:11:45 -0700314 found:
Paolo Bonzinibd792552014-11-26 13:39:59 +0300315 if (tb->cflags & CF_USE_ICOUNT) {
Paolo Bonzini414b15c2015-06-24 14:16:26 +0200316 assert(use_icount);
pbrook2e70f6e2008-06-29 01:03:05 +0000317 /* Reset the cycle counter to the start of the block. */
Richard Hendersonfca8a502015-09-01 19:11:45 -0700318 cpu->icount_decr.u16.low += num_insns;
pbrook2e70f6e2008-06-29 01:03:05 +0000319 /* Clear the IO flag. */
Andreas Färber99df7dc2013-08-26 05:15:23 +0200320 cpu->can_do_io = 0;
pbrook2e70f6e2008-06-29 01:03:05 +0000321 }
Richard Hendersonfca8a502015-09-01 19:11:45 -0700322 cpu->icount_decr.u16.low -= i;
323 restore_state_to_opc(env, tb, data);
bellard57fec1f2008-02-01 10:50:11 +0000324
325#ifdef CONFIG_PROFILER
Richard Hendersonfca8a502015-09-01 19:11:45 -0700326 tcg_ctx.restore_time += profile_getclock() - ti;
327 tcg_ctx.restore_count++;
bellard57fec1f2008-02-01 10:50:11 +0000328#endif
bellardd19893d2003-06-15 19:58:51 +0000329 return 0;
330}
Blue Swirl5b6dd862012-12-02 16:04:43 +0000331
Andreas Färber3f38f302013-09-01 16:51:34 +0200332bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
Blue Swirla8a826a2012-12-04 20:16:07 +0000333{
334 TranslationBlock *tb;
KONRAD Frederica5e99822016-10-27 16:10:06 +0100335 bool r = false;
Blue Swirla8a826a2012-12-04 20:16:07 +0000336
Alex Bennéed8b22392017-03-02 10:31:32 +0000337 /* A retaddr of zero is invalid so we really shouldn't have ended
338 * up here. The target code has likely forgotten to check retaddr
339 * != 0 before attempting to restore state. We return early to
340 * avoid blowing up on a recursive tb_lock(). The target must have
341 * previously survived a failed cpu_restore_state because
342 * tb_find_pc(0) would have failed anyway. It still should be
343 * fixed though.
344 */
345
346 if (!retaddr) {
347 return r;
348 }
349
KONRAD Frederica5e99822016-10-27 16:10:06 +0100350 tb_lock();
Blue Swirla8a826a2012-12-04 20:16:07 +0000351 tb = tb_find_pc(retaddr);
352 if (tb) {
Andreas Färber74f10512013-09-01 17:02:58 +0200353 cpu_restore_state_from_tb(cpu, tb, retaddr);
Pavel Dovgalyukd8a499f2014-11-26 13:40:16 +0300354 if (tb->cflags & CF_NOCACHE) {
355 /* one-shot translation, invalidate it immediately */
Pavel Dovgalyukd8a499f2014-11-26 13:40:16 +0300356 tb_phys_invalidate(tb, -1);
357 tb_free(tb);
358 }
KONRAD Frederica5e99822016-10-27 16:10:06 +0100359 r = true;
Blue Swirla8a826a2012-12-04 20:16:07 +0000360 }
KONRAD Frederica5e99822016-10-27 16:10:06 +0100361 tb_unlock();
362
363 return r;
Blue Swirla8a826a2012-12-04 20:16:07 +0000364}
365
Alexey Kardashevskiy47c16ed2014-01-17 11:12:07 -0700366void page_size_init(void)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000367{
368 /* NOTE: we can always suppose that qemu_host_page_size >=
369 TARGET_PAGE_SIZE */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000370 qemu_real_host_page_size = getpagesize();
Paolo Bonzini0c2d70c2015-12-02 13:00:54 +0100371 qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000372 if (qemu_host_page_size == 0) {
373 qemu_host_page_size = qemu_real_host_page_size;
374 }
375 if (qemu_host_page_size < TARGET_PAGE_SIZE) {
376 qemu_host_page_size = TARGET_PAGE_SIZE;
377 }
Paolo Bonzini0c2d70c2015-12-02 13:00:54 +0100378 qemu_host_page_mask = -(intptr_t)qemu_host_page_size;
Alexey Kardashevskiy47c16ed2014-01-17 11:12:07 -0700379}
Blue Swirl5b6dd862012-12-02 16:04:43 +0000380
Alexey Kardashevskiy47c16ed2014-01-17 11:12:07 -0700381static void page_init(void)
382{
383 page_size_init();
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +0100384 page_table_config_init();
385
Blue Swirl5b6dd862012-12-02 16:04:43 +0000386#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
387 {
388#ifdef HAVE_KINFO_GETVMMAP
389 struct kinfo_vmentry *freep;
390 int i, cnt;
391
392 freep = kinfo_getvmmap(getpid(), &cnt);
393 if (freep) {
394 mmap_lock();
395 for (i = 0; i < cnt; i++) {
396 unsigned long startaddr, endaddr;
397
398 startaddr = freep[i].kve_start;
399 endaddr = freep[i].kve_end;
400 if (h2g_valid(startaddr)) {
401 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
402
403 if (h2g_valid(endaddr)) {
404 endaddr = h2g(endaddr);
405 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
406 } else {
407#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
408 endaddr = ~0ul;
409 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
410#endif
411 }
412 }
413 }
414 free(freep);
415 mmap_unlock();
416 }
417#else
418 FILE *f;
419
420 last_brk = (unsigned long)sbrk(0);
421
422 f = fopen("/compat/linux/proc/self/maps", "r");
423 if (f) {
424 mmap_lock();
425
426 do {
427 unsigned long startaddr, endaddr;
428 int n;
429
430 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
431
432 if (n == 2 && h2g_valid(startaddr)) {
433 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
434
435 if (h2g_valid(endaddr)) {
436 endaddr = h2g(endaddr);
437 } else {
438 endaddr = ~0ul;
439 }
440 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
441 }
442 } while (!feof(f));
443
444 fclose(f);
445 mmap_unlock();
446 }
447#endif
448 }
449#endif
450}
451
Paolo Bonzini75692082015-08-11 10:59:50 +0200452/* If alloc=1:
Paolo Bonzini7d7500d2016-10-27 16:10:03 +0100453 * Called with tb_lock held for system emulation.
Paolo Bonzini75692082015-08-11 10:59:50 +0200454 * Called with mmap_lock held for user-mode emulation.
455 */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000456static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
457{
458 PageDesc *pd;
459 void **lp;
460 int i;
461
Alex Bennéee505a062016-10-27 16:10:05 +0100462 if (alloc) {
463 assert_memory_lock();
464 }
465
Blue Swirl5b6dd862012-12-02 16:04:43 +0000466 /* Level 1. Always allocated. */
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +0100467 lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
Blue Swirl5b6dd862012-12-02 16:04:43 +0000468
469 /* Level 2..N-1. */
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +0100470 for (i = v_l2_levels; i > 0; i--) {
Paolo Bonzini6940fab2015-08-12 09:41:40 +0200471 void **p = atomic_rcu_read(lp);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000472
473 if (p == NULL) {
474 if (!alloc) {
475 return NULL;
476 }
Emilio G. Cotae3a0abf2015-04-09 16:07:33 -0400477 p = g_new0(void *, V_L2_SIZE);
Paolo Bonzini6940fab2015-08-12 09:41:40 +0200478 atomic_rcu_set(lp, p);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000479 }
480
Paolo Bonzini03f49952013-11-07 17:14:36 +0100481 lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
Blue Swirl5b6dd862012-12-02 16:04:43 +0000482 }
483
Paolo Bonzini6940fab2015-08-12 09:41:40 +0200484 pd = atomic_rcu_read(lp);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000485 if (pd == NULL) {
486 if (!alloc) {
487 return NULL;
488 }
Emilio G. Cotae3a0abf2015-04-09 16:07:33 -0400489 pd = g_new0(PageDesc, V_L2_SIZE);
Paolo Bonzini6940fab2015-08-12 09:41:40 +0200490 atomic_rcu_set(lp, pd);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000491 }
492
Paolo Bonzini03f49952013-11-07 17:14:36 +0100493 return pd + (index & (V_L2_SIZE - 1));
Blue Swirl5b6dd862012-12-02 16:04:43 +0000494}
495
496static inline PageDesc *page_find(tb_page_addr_t index)
497{
498 return page_find_alloc(index, 0);
499}
500
Blue Swirl5b6dd862012-12-02 16:04:43 +0000501#if defined(CONFIG_USER_ONLY)
502/* Currently it is not recommended to allocate big chunks of data in
503 user mode. It will change when a dedicated libc will be used. */
504/* ??? 64-bit hosts ought to have no problem mmaping data outside the
505 region in which the guest needs to run. Revisit this. */
506#define USE_STATIC_CODE_GEN_BUFFER
507#endif
508
Blue Swirl5b6dd862012-12-02 16:04:43 +0000509/* Minimum size of the code gen buffer. This number is randomly chosen,
510 but not so small that we can't have a fair number of TB's live. */
511#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
512
513/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
514 indicated, this is constrained by the range of direct branches on the
515 host cpu, as used by the TCG implementation of goto_tb. */
516#if defined(__x86_64__)
517# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
518#elif defined(__sparc__)
519# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
Richard Henderson5bfd75a2015-10-02 22:25:28 +0000520#elif defined(__powerpc64__)
521# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
Sergey Fedorov399f1642016-04-22 19:08:46 +0300522#elif defined(__powerpc__)
523# define MAX_CODE_GEN_BUFFER_SIZE (32u * 1024 * 1024)
Claudio Fontana4a136e02013-06-12 16:20:22 +0100524#elif defined(__aarch64__)
525# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000526#elif defined(__arm__)
527# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
528#elif defined(__s390x__)
529 /* We have a +- 4GB range on the branches; leave some slop. */
530# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
Richard Henderson479eb122014-04-24 08:25:03 -0700531#elif defined(__mips__)
532 /* We have a 256MB branch region, but leave room to make sure the
533 main executable is also within that region. */
534# define MAX_CODE_GEN_BUFFER_SIZE (128ul * 1024 * 1024)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000535#else
536# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
537#endif
538
539#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
540
541#define DEFAULT_CODE_GEN_BUFFER_SIZE \
542 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
543 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
544
545static inline size_t size_code_gen_buffer(size_t tb_size)
546{
547 /* Size the buffer. */
548 if (tb_size == 0) {
549#ifdef USE_STATIC_CODE_GEN_BUFFER
550 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
551#else
552 /* ??? Needs adjustments. */
553 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
554 static buffer, we could size this on RESERVED_VA, on the text
555 segment size of the executable, or continue to use the default. */
556 tb_size = (unsigned long)(ram_size / 4);
557#endif
558 }
559 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
560 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
561 }
562 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
563 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
564 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000565 return tb_size;
566}
567
Richard Henderson483c76e2014-04-24 09:16:07 -0700568#ifdef __mips__
569/* In order to use J and JAL within the code_gen_buffer, we require
570 that the buffer not cross a 256MB boundary. */
571static inline bool cross_256mb(void *addr, size_t size)
572{
Richard Henderson7ba6a512016-04-24 15:55:29 -0700573 return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
Richard Henderson483c76e2014-04-24 09:16:07 -0700574}
575
576/* We weren't able to allocate a buffer without crossing that boundary,
577 so make do with the larger portion of the buffer that doesn't cross.
578 Returns the new base of the buffer, and adjusts code_gen_buffer_size. */
579static inline void *split_cross_256mb(void *buf1, size_t size1)
580{
Richard Henderson7ba6a512016-04-24 15:55:29 -0700581 void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
Richard Henderson483c76e2014-04-24 09:16:07 -0700582 size_t size2 = buf1 + size1 - buf2;
583
584 size1 = buf2 - buf1;
585 if (size1 < size2) {
586 size1 = size2;
587 buf1 = buf2;
588 }
589
590 tcg_ctx.code_gen_buffer_size = size1;
591 return buf1;
592}
593#endif
594
Blue Swirl5b6dd862012-12-02 16:04:43 +0000595#ifdef USE_STATIC_CODE_GEN_BUFFER
596static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
597 __attribute__((aligned(CODE_GEN_ALIGN)));
598
Richard Hendersonf2937092015-09-19 12:03:15 -0700599# ifdef _WIN32
600static inline void do_protect(void *addr, long size, int prot)
601{
602 DWORD old_protect;
603 VirtualProtect(addr, size, prot, &old_protect);
604}
605
606static inline void map_exec(void *addr, long size)
607{
608 do_protect(addr, size, PAGE_EXECUTE_READWRITE);
609}
610
611static inline void map_none(void *addr, long size)
612{
613 do_protect(addr, size, PAGE_NOACCESS);
614}
615# else
616static inline void do_protect(void *addr, long size, int prot)
617{
618 uintptr_t start, end;
619
620 start = (uintptr_t)addr;
621 start &= qemu_real_host_page_mask;
622
623 end = (uintptr_t)addr + size;
624 end = ROUND_UP(end, qemu_real_host_page_size);
625
626 mprotect((void *)start, end - start, prot);
627}
628
629static inline void map_exec(void *addr, long size)
630{
631 do_protect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC);
632}
633
634static inline void map_none(void *addr, long size)
635{
636 do_protect(addr, size, PROT_NONE);
637}
638# endif /* WIN32 */
639
Blue Swirl5b6dd862012-12-02 16:04:43 +0000640static inline void *alloc_code_gen_buffer(void)
641{
Richard Henderson483c76e2014-04-24 09:16:07 -0700642 void *buf = static_code_gen_buffer;
Richard Hendersonf2937092015-09-19 12:03:15 -0700643 size_t full_size, size;
644
645 /* The size of the buffer, rounded down to end on a page boundary. */
646 full_size = (((uintptr_t)buf + sizeof(static_code_gen_buffer))
647 & qemu_real_host_page_mask) - (uintptr_t)buf;
648
649 /* Reserve a guard page. */
650 size = full_size - qemu_real_host_page_size;
651
652 /* Honor a command-line option limiting the size of the buffer. */
653 if (size > tcg_ctx.code_gen_buffer_size) {
654 size = (((uintptr_t)buf + tcg_ctx.code_gen_buffer_size)
655 & qemu_real_host_page_mask) - (uintptr_t)buf;
656 }
657 tcg_ctx.code_gen_buffer_size = size;
658
Richard Henderson483c76e2014-04-24 09:16:07 -0700659#ifdef __mips__
Richard Hendersonf2937092015-09-19 12:03:15 -0700660 if (cross_256mb(buf, size)) {
661 buf = split_cross_256mb(buf, size);
662 size = tcg_ctx.code_gen_buffer_size;
Richard Henderson483c76e2014-04-24 09:16:07 -0700663 }
664#endif
Richard Hendersonf2937092015-09-19 12:03:15 -0700665
666 map_exec(buf, size);
667 map_none(buf + size, qemu_real_host_page_size);
668 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
669
Richard Henderson483c76e2014-04-24 09:16:07 -0700670 return buf;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000671}
Richard Hendersonf2937092015-09-19 12:03:15 -0700672#elif defined(_WIN32)
673static inline void *alloc_code_gen_buffer(void)
674{
675 size_t size = tcg_ctx.code_gen_buffer_size;
676 void *buf1, *buf2;
677
678 /* Perform the allocation in two steps, so that the guard page
679 is reserved but uncommitted. */
680 buf1 = VirtualAlloc(NULL, size + qemu_real_host_page_size,
681 MEM_RESERVE, PAGE_NOACCESS);
682 if (buf1 != NULL) {
683 buf2 = VirtualAlloc(buf1, size, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
684 assert(buf1 == buf2);
685 }
686
687 return buf1;
688}
689#else
Blue Swirl5b6dd862012-12-02 16:04:43 +0000690static inline void *alloc_code_gen_buffer(void)
691{
692 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
693 uintptr_t start = 0;
Richard Hendersonf2937092015-09-19 12:03:15 -0700694 size_t size = tcg_ctx.code_gen_buffer_size;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000695 void *buf;
696
697 /* Constrain the position of the buffer based on the host cpu.
698 Note that these addresses are chosen in concert with the
699 addresses assigned in the relevant linker script file. */
700# if defined(__PIE__) || defined(__PIC__)
701 /* Don't bother setting a preferred location if we're building
702 a position-independent executable. We're more likely to get
703 an address near the main executable if we let the kernel
704 choose the address. */
705# elif defined(__x86_64__) && defined(MAP_32BIT)
706 /* Force the memory down into low memory with the executable.
707 Leave the choice of exact location with the kernel. */
708 flags |= MAP_32BIT;
709 /* Cannot expect to map more than 800MB in low memory. */
Richard Hendersonf2937092015-09-19 12:03:15 -0700710 if (size > 800u * 1024 * 1024) {
711 tcg_ctx.code_gen_buffer_size = size = 800u * 1024 * 1024;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000712 }
713# elif defined(__sparc__)
714 start = 0x40000000ul;
715# elif defined(__s390x__)
716 start = 0x90000000ul;
Richard Henderson479eb122014-04-24 08:25:03 -0700717# elif defined(__mips__)
Richard Hendersonf2937092015-09-19 12:03:15 -0700718# if _MIPS_SIM == _ABI64
Richard Henderson479eb122014-04-24 08:25:03 -0700719 start = 0x128000000ul;
720# else
721 start = 0x08000000ul;
722# endif
Blue Swirl5b6dd862012-12-02 16:04:43 +0000723# endif
724
Richard Hendersonf2937092015-09-19 12:03:15 -0700725 buf = mmap((void *)start, size + qemu_real_host_page_size,
726 PROT_NONE, flags, -1, 0);
Richard Henderson483c76e2014-04-24 09:16:07 -0700727 if (buf == MAP_FAILED) {
728 return NULL;
729 }
730
731#ifdef __mips__
Richard Hendersonf2937092015-09-19 12:03:15 -0700732 if (cross_256mb(buf, size)) {
Stefan Weil5d831be2014-06-13 20:42:57 +0200733 /* Try again, with the original still mapped, to avoid re-acquiring
Richard Henderson483c76e2014-04-24 09:16:07 -0700734 that 256mb crossing. This time don't specify an address. */
Richard Hendersonf2937092015-09-19 12:03:15 -0700735 size_t size2;
736 void *buf2 = mmap(NULL, size + qemu_real_host_page_size,
737 PROT_NONE, flags, -1, 0);
Richard Hendersonf68808c2017-01-08 09:48:34 -0800738 switch ((int)(buf2 != MAP_FAILED)) {
Richard Hendersonf2937092015-09-19 12:03:15 -0700739 case 1:
740 if (!cross_256mb(buf2, size)) {
Richard Henderson483c76e2014-04-24 09:16:07 -0700741 /* Success! Use the new buffer. */
Emilio G. Cota8bdf4992016-04-21 20:01:54 -0400742 munmap(buf, size + qemu_real_host_page_size);
Richard Hendersonf2937092015-09-19 12:03:15 -0700743 break;
Richard Henderson483c76e2014-04-24 09:16:07 -0700744 }
745 /* Failure. Work with what we had. */
Emilio G. Cota8bdf4992016-04-21 20:01:54 -0400746 munmap(buf2, size + qemu_real_host_page_size);
Richard Hendersonf2937092015-09-19 12:03:15 -0700747 /* fallthru */
748 default:
749 /* Split the original buffer. Free the smaller half. */
750 buf2 = split_cross_256mb(buf, size);
751 size2 = tcg_ctx.code_gen_buffer_size;
752 if (buf == buf2) {
753 munmap(buf + size2 + qemu_real_host_page_size, size - size2);
754 } else {
755 munmap(buf, size - size2);
756 }
757 size = size2;
758 break;
Richard Henderson483c76e2014-04-24 09:16:07 -0700759 }
Richard Hendersonf2937092015-09-19 12:03:15 -0700760 buf = buf2;
Richard Henderson483c76e2014-04-24 09:16:07 -0700761 }
762#endif
763
Richard Hendersonf2937092015-09-19 12:03:15 -0700764 /* Make the final buffer accessible. The guard page at the end
765 will remain inaccessible with PROT_NONE. */
766 mprotect(buf, size, PROT_WRITE | PROT_READ | PROT_EXEC);
767
768 /* Request large pages for the buffer. */
769 qemu_madvise(buf, size, QEMU_MADV_HUGEPAGE);
770
Richard Henderson483c76e2014-04-24 09:16:07 -0700771 return buf;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000772}
Richard Hendersonf2937092015-09-19 12:03:15 -0700773#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000774
775static inline void code_gen_alloc(size_t tb_size)
776{
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700777 tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
778 tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
779 if (tcg_ctx.code_gen_buffer == NULL) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000780 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
781 exit(1);
782 }
783
Richard Henderson8163b742015-09-18 23:43:05 -0700784 /* Estimate a good size for the number of TBs we can support. We
785 still haven't deducted the prologue from the buffer size here,
786 but that's minimal and won't affect the estimate much. */
787 tcg_ctx.code_gen_max_blocks
788 = tcg_ctx.code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
789 tcg_ctx.tb_ctx.tbs = g_new(TranslationBlock, tcg_ctx.code_gen_max_blocks);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000790
KONRAD Frederic677ef622015-08-10 17:27:02 +0200791 qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000792}
793
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400794static void tb_htable_init(void)
795{
796 unsigned int mode = QHT_MODE_AUTO_RESIZE;
797
798 qht_init(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE, mode);
799}
800
Blue Swirl5b6dd862012-12-02 16:04:43 +0000801/* Must be called before using the QEMU cpus. 'tb_size' is the size
802 (in bytes) allocated to the translation buffer. Zero means default
803 size. */
804void tcg_exec_init(unsigned long tb_size)
805{
806 cpu_gen_init();
Blue Swirl5b6dd862012-12-02 16:04:43 +0000807 page_init();
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400808 tb_htable_init();
Richard Hendersonf2937092015-09-19 12:03:15 -0700809 code_gen_alloc(tb_size);
Laurent Vivier4cbea592015-08-24 01:42:07 +0200810#if defined(CONFIG_SOFTMMU)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000811 /* There's no guest base to take into account, so go ahead and
812 initialize the prologue now. */
813 tcg_prologue_init(&tcg_ctx);
814#endif
815}
816
817bool tcg_enabled(void)
818{
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700819 return tcg_ctx.code_gen_buffer != NULL;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000820}
821
Paolo Bonzini7d7500d2016-10-27 16:10:03 +0100822/*
823 * Allocate a new translation block. Flush the translation buffer if
824 * too many translation blocks or too much generated code.
825 *
826 * Called with tb_lock held.
827 */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000828static TranslationBlock *tb_alloc(target_ulong pc)
829{
830 TranslationBlock *tb;
831
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +0000832 assert_tb_locked();
Alex Bennéee505a062016-10-27 16:10:05 +0100833
Richard Hendersonb125f9d2015-09-22 13:01:15 -0700834 if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000835 return NULL;
836 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700837 tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
Blue Swirl5b6dd862012-12-02 16:04:43 +0000838 tb->pc = pc;
839 tb->cflags = 0;
Paolo Bonzini6d21e422016-07-19 08:36:18 +0200840 tb->invalid = false;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000841 return tb;
842}
843
Paolo Bonzini7d7500d2016-10-27 16:10:03 +0100844/* Called with tb_lock held. */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000845void tb_free(TranslationBlock *tb)
846{
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +0000847 assert_tb_locked();
Alex Bennéee505a062016-10-27 16:10:05 +0100848
Blue Swirl5b6dd862012-12-02 16:04:43 +0000849 /* In practice this is mostly used for single use temporary TB
850 Ignore the hard cases and just back up if this TB happens to
851 be the last one generated. */
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700852 if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
853 tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700854 tcg_ctx.code_gen_ptr = tb->tc_ptr;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700855 tcg_ctx.tb_ctx.nb_tbs--;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000856 }
857}
858
859static inline void invalidate_page_bitmap(PageDesc *p)
860{
Paolo Bonzini6fad4592015-08-11 12:42:55 +0200861#ifdef CONFIG_SOFTMMU
Markus Armbruster012aef02015-08-26 14:02:53 +0200862 g_free(p->code_bitmap);
863 p->code_bitmap = NULL;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000864 p->code_write_count = 0;
Paolo Bonzini6fad4592015-08-11 12:42:55 +0200865#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +0000866}
867
868/* Set to NULL all the 'first_tb' fields in all PageDescs. */
869static void page_flush_tb_1(int level, void **lp)
870{
871 int i;
872
873 if (*lp == NULL) {
874 return;
875 }
876 if (level == 0) {
877 PageDesc *pd = *lp;
878
Paolo Bonzini03f49952013-11-07 17:14:36 +0100879 for (i = 0; i < V_L2_SIZE; ++i) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000880 pd[i].first_tb = NULL;
881 invalidate_page_bitmap(pd + i);
882 }
883 } else {
884 void **pp = *lp;
885
Paolo Bonzini03f49952013-11-07 17:14:36 +0100886 for (i = 0; i < V_L2_SIZE; ++i) {
Blue Swirl5b6dd862012-12-02 16:04:43 +0000887 page_flush_tb_1(level - 1, pp + i);
888 }
889 }
890}
891
892static void page_flush_tb(void)
893{
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +0100894 int i, l1_sz = v_l1_size;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000895
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +0100896 for (i = 0; i < l1_sz; i++) {
897 page_flush_tb_1(v_l2_levels, l1_map + i);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000898 }
899}
900
901/* flush all the translation blocks */
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100902static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000903{
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100904 tb_lock();
905
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100906 /* If it is already been done on request of another CPU,
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100907 * just retry.
908 */
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100909 if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100910 goto done;
Christian Borntraeger135a9722016-08-25 20:11:26 +0200911 }
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100912
Alex Bennée955939a2016-10-27 16:09:59 +0100913#if defined(DEBUG_TB_FLUSH)
Blue Swirl5b6dd862012-12-02 16:04:43 +0000914 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700915 (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700916 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700917 ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700918 tcg_ctx.tb_ctx.nb_tbs : 0);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000919#endif
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700920 if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
921 > tcg_ctx.code_gen_buffer_size) {
Andreas Färbera47dddd2013-09-03 17:38:47 +0200922 cpu_abort(cpu, "Internal error: code buffer overflow\n");
Blue Swirl5b6dd862012-12-02 16:04:43 +0000923 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000924
Andreas Färberbdc44642013-06-24 23:50:24 +0200925 CPU_FOREACH(cpu) {
Sergey Fedorov89a16b12016-07-15 20:58:43 +0300926 int i;
927
928 for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
929 atomic_set(&cpu->tb_jmp_cache[i], NULL);
930 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000931 }
932
Sergey Fedorov118b0732016-07-15 20:58:44 +0300933 tcg_ctx.tb_ctx.nb_tbs = 0;
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400934 qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000935 page_flush_tb();
936
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +0700937 tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
Blue Swirl5b6dd862012-12-02 16:04:43 +0000938 /* XXX: flush processor icache at this point if cache flush is
939 expensive */
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100940 atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
941 tcg_ctx.tb_ctx.tb_flush_count + 1);
942
943done:
944 tb_unlock();
945}
946
947void tb_flush(CPUState *cpu)
948{
949 if (tcg_enabled()) {
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100950 unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
951 async_safe_run_on_cpu(cpu, do_tb_flush,
952 RUN_ON_CPU_HOST_INT(tb_flush_count));
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100953 }
Blue Swirl5b6dd862012-12-02 16:04:43 +0000954}
955
956#ifdef DEBUG_TB_CHECK
957
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400958static void
959do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
960{
961 TranslationBlock *tb = p;
962 target_ulong addr = *(target_ulong *)userp;
963
964 if (!(addr + TARGET_PAGE_SIZE <= tb->pc || addr >= tb->pc + tb->size)) {
965 printf("ERROR invalidate: address=" TARGET_FMT_lx
966 " PC=%08lx size=%04x\n", addr, (long)tb->pc, tb->size);
967 }
968}
969
Paolo Bonzini7d7500d2016-10-27 16:10:03 +0100970/* verify that all the pages have correct rights for code
971 *
972 * Called with tb_lock held.
973 */
Blue Swirl5b6dd862012-12-02 16:04:43 +0000974static void tb_invalidate_check(target_ulong address)
975{
Blue Swirl5b6dd862012-12-02 16:04:43 +0000976 address &= TARGET_PAGE_MASK;
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400977 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_invalidate_check, &address);
978}
979
980static void
981do_tb_page_check(struct qht *ht, void *p, uint32_t hash, void *userp)
982{
983 TranslationBlock *tb = p;
984 int flags1, flags2;
985
986 flags1 = page_get_flags(tb->pc);
987 flags2 = page_get_flags(tb->pc + tb->size - 1);
988 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
989 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
990 (long)tb->pc, tb->size, flags1, flags2);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000991 }
992}
993
994/* verify that all the pages have correct rights for code */
995static void tb_page_check(void)
996{
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400997 qht_iter(&tcg_ctx.tb_ctx.htable, do_tb_page_check, NULL);
Blue Swirl5b6dd862012-12-02 16:04:43 +0000998}
999
1000#endif
1001
Blue Swirl5b6dd862012-12-02 16:04:43 +00001002static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
1003{
1004 TranslationBlock *tb1;
1005 unsigned int n1;
1006
1007 for (;;) {
1008 tb1 = *ptb;
1009 n1 = (uintptr_t)tb1 & 3;
1010 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1011 if (tb1 == tb) {
1012 *ptb = tb1->page_next[n1];
1013 break;
1014 }
1015 ptb = &tb1->page_next[n1];
1016 }
1017}
1018
Sergey Fedorov13362672016-03-23 18:30:16 +03001019/* remove the TB from a list of TBs jumping to the n-th jump target of the TB */
1020static inline void tb_remove_from_jmp_list(TranslationBlock *tb, int n)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001021{
Sergey Fedorovc37e6d72016-03-21 23:11:00 +03001022 TranslationBlock *tb1;
1023 uintptr_t *ptb, ntb;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001024 unsigned int n1;
1025
Sergey Fedorovf3091012016-04-10 23:35:45 +03001026 ptb = &tb->jmp_list_next[n];
Sergey Fedorovc37e6d72016-03-21 23:11:00 +03001027 if (*ptb) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001028 /* find tb(n) in circular list */
1029 for (;;) {
Sergey Fedorovc37e6d72016-03-21 23:11:00 +03001030 ntb = *ptb;
1031 n1 = ntb & 3;
1032 tb1 = (TranslationBlock *)(ntb & ~3);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001033 if (n1 == n && tb1 == tb) {
1034 break;
1035 }
1036 if (n1 == 2) {
Sergey Fedorovf3091012016-04-10 23:35:45 +03001037 ptb = &tb1->jmp_list_first;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001038 } else {
Sergey Fedorovf3091012016-04-10 23:35:45 +03001039 ptb = &tb1->jmp_list_next[n1];
Blue Swirl5b6dd862012-12-02 16:04:43 +00001040 }
1041 }
1042 /* now we can suppress tb(n) from the list */
Sergey Fedorovf3091012016-04-10 23:35:45 +03001043 *ptb = tb->jmp_list_next[n];
Blue Swirl5b6dd862012-12-02 16:04:43 +00001044
Sergey Fedorovc37e6d72016-03-21 23:11:00 +03001045 tb->jmp_list_next[n] = (uintptr_t)NULL;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001046 }
1047}
1048
1049/* reset the jump entry 'n' of a TB so that it is not chained to
1050 another TB */
1051static inline void tb_reset_jump(TranslationBlock *tb, int n)
1052{
Sergey Fedorovf3091012016-04-10 23:35:45 +03001053 uintptr_t addr = (uintptr_t)(tb->tc_ptr + tb->jmp_reset_offset[n]);
1054 tb_set_jmp_target(tb, n, addr);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001055}
1056
Sergey Fedorov89bba492016-03-23 18:36:31 +03001057/* remove any jumps to the TB */
1058static inline void tb_jmp_unlink(TranslationBlock *tb)
1059{
Sergey Fedorovf9c5b662016-03-23 21:47:33 +03001060 TranslationBlock *tb1;
1061 uintptr_t *ptb, ntb;
Sergey Fedorov89bba492016-03-23 18:36:31 +03001062 unsigned int n1;
1063
Sergey Fedorovf9c5b662016-03-23 21:47:33 +03001064 ptb = &tb->jmp_list_first;
Sergey Fedorov89bba492016-03-23 18:36:31 +03001065 for (;;) {
Sergey Fedorovf9c5b662016-03-23 21:47:33 +03001066 ntb = *ptb;
1067 n1 = ntb & 3;
1068 tb1 = (TranslationBlock *)(ntb & ~3);
Sergey Fedorov89bba492016-03-23 18:36:31 +03001069 if (n1 == 2) {
1070 break;
1071 }
Sergey Fedorovf9c5b662016-03-23 21:47:33 +03001072 tb_reset_jump(tb1, n1);
1073 *ptb = tb1->jmp_list_next[n1];
1074 tb1->jmp_list_next[n1] = (uintptr_t)NULL;
Sergey Fedorov89bba492016-03-23 18:36:31 +03001075 }
Sergey Fedorov89bba492016-03-23 18:36:31 +03001076}
1077
Paolo Bonzini7d7500d2016-10-27 16:10:03 +01001078/* invalidate one TB
1079 *
1080 * Called with tb_lock held.
1081 */
Blue Swirl5b6dd862012-12-02 16:04:43 +00001082void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
1083{
Andreas Färber182735e2013-05-29 22:29:20 +02001084 CPUState *cpu;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001085 PageDesc *p;
Emilio G. Cota42bd3222016-06-08 14:55:25 -04001086 uint32_t h;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001087 tb_page_addr_t phys_pc;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001088
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +00001089 assert_tb_locked();
Alex Bennéee505a062016-10-27 16:10:05 +01001090
Paolo Bonzini6d21e422016-07-19 08:36:18 +02001091 atomic_set(&tb->invalid, true);
1092
Blue Swirl5b6dd862012-12-02 16:04:43 +00001093 /* remove the TB from the hash list */
1094 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
Emilio G. Cota42bd3222016-06-08 14:55:25 -04001095 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
Emilio G. Cota909eaac2016-06-08 14:55:32 -04001096 qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001097
1098 /* remove the TB from the page list */
1099 if (tb->page_addr[0] != page_addr) {
1100 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
1101 tb_page_remove(&p->first_tb, tb);
1102 invalidate_page_bitmap(p);
1103 }
1104 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
1105 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
1106 tb_page_remove(&p->first_tb, tb);
1107 invalidate_page_bitmap(p);
1108 }
1109
Blue Swirl5b6dd862012-12-02 16:04:43 +00001110 /* remove the TB from the hash list */
1111 h = tb_jmp_cache_hash_func(tb->pc);
Andreas Färberbdc44642013-06-24 23:50:24 +02001112 CPU_FOREACH(cpu) {
Sergey Fedorov89a16b12016-07-15 20:58:43 +03001113 if (atomic_read(&cpu->tb_jmp_cache[h]) == tb) {
1114 atomic_set(&cpu->tb_jmp_cache[h], NULL);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001115 }
1116 }
1117
1118 /* suppress this TB from the two jump lists */
Sergey Fedorov13362672016-03-23 18:30:16 +03001119 tb_remove_from_jmp_list(tb, 0);
1120 tb_remove_from_jmp_list(tb, 1);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001121
1122 /* suppress any remaining jumps to this TB */
Sergey Fedorov89bba492016-03-23 18:36:31 +03001123 tb_jmp_unlink(tb);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001124
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001125 tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001126}
1127
Paolo Bonzini6fad4592015-08-11 12:42:55 +02001128#ifdef CONFIG_SOFTMMU
Blue Swirl5b6dd862012-12-02 16:04:43 +00001129static void build_page_bitmap(PageDesc *p)
1130{
1131 int n, tb_start, tb_end;
1132 TranslationBlock *tb;
1133
Emilio G. Cota510a6472015-04-22 17:50:52 -04001134 p->code_bitmap = bitmap_new(TARGET_PAGE_SIZE);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001135
1136 tb = p->first_tb;
1137 while (tb != NULL) {
1138 n = (uintptr_t)tb & 3;
1139 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1140 /* NOTE: this is subtle as a TB may span two physical pages */
1141 if (n == 0) {
1142 /* NOTE: tb_end may be after the end of the page, but
1143 it is not a problem */
1144 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1145 tb_end = tb_start + tb->size;
1146 if (tb_end > TARGET_PAGE_SIZE) {
1147 tb_end = TARGET_PAGE_SIZE;
Alex Bennéee505a062016-10-27 16:10:05 +01001148 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001149 } else {
1150 tb_start = 0;
1151 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1152 }
Emilio G. Cota510a6472015-04-22 17:50:52 -04001153 bitmap_set(p->code_bitmap, tb_start, tb_end - tb_start);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001154 tb = tb->page_next[n];
1155 }
1156}
Paolo Bonzini6fad4592015-08-11 12:42:55 +02001157#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001158
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001159/* add the tb in the target page and protect it if necessary
1160 *
1161 * Called with mmap_lock held for user-mode emulation.
1162 */
1163static inline void tb_alloc_page(TranslationBlock *tb,
1164 unsigned int n, tb_page_addr_t page_addr)
1165{
1166 PageDesc *p;
1167#ifndef CONFIG_USER_ONLY
1168 bool page_already_protected;
1169#endif
1170
Alex Bennéee505a062016-10-27 16:10:05 +01001171 assert_memory_lock();
1172
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001173 tb->page_addr[n] = page_addr;
1174 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1175 tb->page_next[n] = p->first_tb;
1176#ifndef CONFIG_USER_ONLY
1177 page_already_protected = p->first_tb != NULL;
1178#endif
1179 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1180 invalidate_page_bitmap(p);
1181
1182#if defined(CONFIG_USER_ONLY)
1183 if (p->flags & PAGE_WRITE) {
1184 target_ulong addr;
1185 PageDesc *p2;
1186 int prot;
1187
1188 /* force the host page as non writable (writes will have a
1189 page fault + mprotect overhead) */
1190 page_addr &= qemu_host_page_mask;
1191 prot = 0;
1192 for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1193 addr += TARGET_PAGE_SIZE) {
1194
1195 p2 = page_find(addr >> TARGET_PAGE_BITS);
1196 if (!p2) {
1197 continue;
1198 }
1199 prot |= p2->flags;
1200 p2->flags &= ~PAGE_WRITE;
1201 }
1202 mprotect(g2h(page_addr), qemu_host_page_size,
1203 (prot & PAGE_BITS) & ~PAGE_WRITE);
1204#ifdef DEBUG_TB_INVALIDATE
1205 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1206 page_addr);
1207#endif
1208 }
1209#else
1210 /* if some code is already present, then the pages are already
1211 protected. So we handle the case where only the first TB is
1212 allocated in a physical page */
1213 if (!page_already_protected) {
1214 tlb_protect_code(page_addr);
1215 }
1216#endif
1217}
1218
1219/* add a new TB and link it to the physical page tables. phys_page2 is
1220 * (-1) to indicate that only one page contains the TB.
1221 *
1222 * Called with mmap_lock held for user-mode emulation.
1223 */
1224static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1225 tb_page_addr_t phys_page2)
1226{
Emilio G. Cota42bd3222016-06-08 14:55:25 -04001227 uint32_t h;
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001228
Alex Bennéee505a062016-10-27 16:10:05 +01001229 assert_memory_lock();
1230
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001231 /* add in the page list */
1232 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1233 if (phys_page2 != -1) {
1234 tb_alloc_page(tb, 1, phys_page2);
1235 } else {
1236 tb->page_addr[1] = -1;
1237 }
1238
Alex Bennée2e1ae442016-07-15 20:58:48 +03001239 /* add in the hash table */
1240 h = tb_hash_func(phys_pc, tb->pc, tb->flags);
1241 qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
1242
Sergey Fedorove90d96b2016-03-22 18:47:54 +03001243#ifdef DEBUG_TB_CHECK
1244 tb_page_check();
1245#endif
1246}
1247
Paolo Bonzini75692082015-08-11 10:59:50 +02001248/* Called with mmap_lock held for user mode emulation. */
Andreas Färber648f0342013-09-01 17:43:17 +02001249TranslationBlock *tb_gen_code(CPUState *cpu,
Blue Swirl5b6dd862012-12-02 16:04:43 +00001250 target_ulong pc, target_ulong cs_base,
Emilio G. Cota89fee742016-04-07 13:19:22 -04001251 uint32_t flags, int cflags)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001252{
Andreas Färber648f0342013-09-01 17:43:17 +02001253 CPUArchState *env = cpu->env_ptr;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001254 TranslationBlock *tb;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001255 tb_page_addr_t phys_pc, phys_page2;
1256 target_ulong virt_page2;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001257 tcg_insn_unit *gen_code_buf;
Richard Hendersonfca8a502015-09-01 19:11:45 -07001258 int gen_code_size, search_size;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001259#ifdef CONFIG_PROFILER
1260 int64_t ti;
1261#endif
Alex Bennéee505a062016-10-27 16:10:05 +01001262 assert_memory_lock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001263
1264 phys_pc = get_page_addr_code(env, pc);
Pavel Dovgalyuk56c02692015-09-17 19:23:59 +03001265 if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
Paolo Bonzini02663592014-11-26 13:39:53 +03001266 cflags |= CF_USE_ICOUNT;
1267 }
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001268
Blue Swirl5b6dd862012-12-02 16:04:43 +00001269 tb = tb_alloc(pc);
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001270 if (unlikely(!tb)) {
1271 buffer_overflow:
Blue Swirl5b6dd862012-12-02 16:04:43 +00001272 /* flush must be done */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -07001273 tb_flush(cpu);
Sergey Fedorov3359baa2016-08-02 18:27:43 +01001274 mmap_unlock();
Pavel Dovgalyuk8499c8f2017-01-26 15:34:18 +03001275 /* Make the execution loop process the flush as soon as possible. */
1276 cpu->exception_index = EXCP_INTERRUPT;
Sergey Fedorov3359baa2016-08-02 18:27:43 +01001277 cpu_loop_exit(cpu);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001278 }
Richard Hendersonfec88f62015-08-27 18:17:40 -07001279
1280 gen_code_buf = tcg_ctx.code_gen_ptr;
1281 tb->tc_ptr = gen_code_buf;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001282 tb->cs_base = cs_base;
1283 tb->flags = flags;
1284 tb->cflags = cflags;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001285
1286#ifdef CONFIG_PROFILER
1287 tcg_ctx.tb_count1++; /* includes aborted translations because of
1288 exceptions */
1289 ti = profile_getclock();
1290#endif
1291
1292 tcg_func_start(&tcg_ctx);
1293
Lluís Vilanova7c255042016-06-09 19:31:41 +02001294 tcg_ctx.cpu = ENV_GET_CPU(env);
Richard Hendersonfec88f62015-08-27 18:17:40 -07001295 gen_intermediate_code(env, tb);
Lluís Vilanova7c255042016-06-09 19:31:41 +02001296 tcg_ctx.cpu = NULL;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001297
1298 trace_translate_block(tb, tb->pc, tb->tc_ptr);
1299
1300 /* generate machine code */
Sergey Fedorovf3091012016-04-10 23:35:45 +03001301 tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
1302 tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
1303 tcg_ctx.tb_jmp_reset_offset = tb->jmp_reset_offset;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001304#ifdef USE_DIRECT_JUMP
Sergey Fedorovf3091012016-04-10 23:35:45 +03001305 tcg_ctx.tb_jmp_insn_offset = tb->jmp_insn_offset;
1306 tcg_ctx.tb_jmp_target_addr = NULL;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001307#else
Sergey Fedorovf3091012016-04-10 23:35:45 +03001308 tcg_ctx.tb_jmp_insn_offset = NULL;
1309 tcg_ctx.tb_jmp_target_addr = tb->jmp_target_addr;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001310#endif
1311
1312#ifdef CONFIG_PROFILER
1313 tcg_ctx.tb_count++;
1314 tcg_ctx.interm_time += profile_getclock() - ti;
1315 tcg_ctx.code_time -= profile_getclock();
1316#endif
1317
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001318 /* ??? Overflow could be handled better here. In particular, we
1319 don't need to re-do gen_intermediate_code, nor should we re-do
1320 the tcg optimization currently hidden inside tcg_gen_code. All
1321 that should be required is to flush the TBs, allocate a new TB,
1322 re-initialize it per above, and re-do the actual code generation. */
Alex Bennée5bd2ec32016-03-15 14:30:16 +00001323 gen_code_size = tcg_gen_code(&tcg_ctx, tb);
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001324 if (unlikely(gen_code_size < 0)) {
1325 goto buffer_overflow;
1326 }
Richard Hendersonfca8a502015-09-01 19:11:45 -07001327 search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001328 if (unlikely(search_size < 0)) {
1329 goto buffer_overflow;
1330 }
Richard Hendersonfec88f62015-08-27 18:17:40 -07001331
1332#ifdef CONFIG_PROFILER
1333 tcg_ctx.code_time += profile_getclock();
1334 tcg_ctx.code_in_len += tb->size;
1335 tcg_ctx.code_out_len += gen_code_size;
Richard Hendersonfca8a502015-09-01 19:11:45 -07001336 tcg_ctx.search_out_len += search_size;
Richard Hendersonfec88f62015-08-27 18:17:40 -07001337#endif
1338
1339#ifdef DEBUG_DISAS
Alex Bennéed977e1c2016-03-15 14:30:21 +00001340 if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
1341 qemu_log_in_addr_range(tb->pc)) {
Richard Henderson1ee73212016-09-22 15:17:10 -07001342 qemu_log_lock();
Richard Hendersonfec88f62015-08-27 18:17:40 -07001343 qemu_log("OUT: [size=%d]\n", gen_code_size);
1344 log_disas(tb->tc_ptr, gen_code_size);
1345 qemu_log("\n");
1346 qemu_log_flush();
Richard Henderson1ee73212016-09-22 15:17:10 -07001347 qemu_log_unlock();
Richard Hendersonfec88f62015-08-27 18:17:40 -07001348 }
1349#endif
1350
Richard Hendersonfca8a502015-09-01 19:11:45 -07001351 tcg_ctx.code_gen_ptr = (void *)
1352 ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
1353 CODE_GEN_ALIGN);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001354
Sergey Fedorov901bc3d2016-03-22 19:00:12 +03001355 /* init jump list */
1356 assert(((uintptr_t)tb & 3) == 0);
1357 tb->jmp_list_first = (uintptr_t)tb | 2;
1358 tb->jmp_list_next[0] = (uintptr_t)NULL;
1359 tb->jmp_list_next[1] = (uintptr_t)NULL;
1360
1361 /* init original jump addresses wich has been set during tcg_gen_code() */
1362 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
1363 tb_reset_jump(tb, 0);
1364 }
1365 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
1366 tb_reset_jump(tb, 1);
1367 }
1368
Blue Swirl5b6dd862012-12-02 16:04:43 +00001369 /* check next page if needed */
1370 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1371 phys_page2 = -1;
1372 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1373 phys_page2 = get_page_addr_code(env, virt_page2);
1374 }
Sergey Fedorov901bc3d2016-03-22 19:00:12 +03001375 /* As long as consistency of the TB stuff is provided by tb_lock in user
1376 * mode and is implicit in single-threaded softmmu emulation, no explicit
1377 * memory barrier is required before tb_link_page() makes the TB visible
1378 * through the physical hash table and physical page list.
1379 */
Blue Swirl5b6dd862012-12-02 16:04:43 +00001380 tb_link_page(tb, phys_pc, phys_page2);
1381 return tb;
1382}
1383
1384/*
1385 * Invalidate all TBs which intersect with the target physical address range
1386 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1387 * 'is_cpu_write_access' should be true if called from a real cpu write
1388 * access: the virtual CPU will exit the current TB if code is modified inside
1389 * this TB.
Paolo Bonzini75692082015-08-11 10:59:50 +02001390 *
Alex Bennéeba051fb2016-10-27 16:10:16 +01001391 * Called with mmap_lock held for user-mode emulation, grabs tb_lock
1392 * Called with tb_lock held for system-mode emulation
Blue Swirl5b6dd862012-12-02 16:04:43 +00001393 */
Alex Bennéeba051fb2016-10-27 16:10:16 +01001394static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001395{
1396 while (start < end) {
Paolo Bonzini35865332015-04-22 14:20:35 +02001397 tb_invalidate_phys_page_range(start, end, 0);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001398 start &= TARGET_PAGE_MASK;
1399 start += TARGET_PAGE_SIZE;
1400 }
1401}
1402
Alex Bennéeba051fb2016-10-27 16:10:16 +01001403#ifdef CONFIG_SOFTMMU
1404void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1405{
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +00001406 assert_tb_locked();
Alex Bennéeba051fb2016-10-27 16:10:16 +01001407 tb_invalidate_phys_range_1(start, end);
1408}
1409#else
1410void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
1411{
1412 assert_memory_lock();
1413 tb_lock();
1414 tb_invalidate_phys_range_1(start, end);
1415 tb_unlock();
1416}
1417#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001418/*
1419 * Invalidate all TBs which intersect with the target physical address range
1420 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1421 * 'is_cpu_write_access' should be true if called from a real cpu write
1422 * access: the virtual CPU will exit the current TB if code is modified inside
1423 * this TB.
Paolo Bonzini75692082015-08-11 10:59:50 +02001424 *
Alex Bennéeba051fb2016-10-27 16:10:16 +01001425 * Called with tb_lock/mmap_lock held for user-mode emulation
1426 * Called with tb_lock held for system-mode emulation
Blue Swirl5b6dd862012-12-02 16:04:43 +00001427 */
1428void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1429 int is_cpu_write_access)
1430{
Sergey Fedorov32135252016-05-03 14:04:22 +03001431 TranslationBlock *tb, *tb_next;
Andreas Färberbaea4fa2013-09-03 10:51:26 +02001432#if defined(TARGET_HAS_PRECISE_SMC)
Sergey Fedorov32135252016-05-03 14:04:22 +03001433 CPUState *cpu = current_cpu;
Andreas Färber4917cf42013-05-27 05:17:50 +02001434 CPUArchState *env = NULL;
1435#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001436 tb_page_addr_t tb_start, tb_end;
1437 PageDesc *p;
1438 int n;
1439#ifdef TARGET_HAS_PRECISE_SMC
1440 int current_tb_not_found = is_cpu_write_access;
1441 TranslationBlock *current_tb = NULL;
1442 int current_tb_modified = 0;
1443 target_ulong current_pc = 0;
1444 target_ulong current_cs_base = 0;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001445 uint32_t current_flags = 0;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001446#endif /* TARGET_HAS_PRECISE_SMC */
1447
Alex Bennéee505a062016-10-27 16:10:05 +01001448 assert_memory_lock();
Pranith Kumar6ac3d7e2017-02-23 18:29:05 +00001449 assert_tb_locked();
Alex Bennéee505a062016-10-27 16:10:05 +01001450
Blue Swirl5b6dd862012-12-02 16:04:43 +00001451 p = page_find(start >> TARGET_PAGE_BITS);
1452 if (!p) {
1453 return;
1454 }
Andreas Färberbaea4fa2013-09-03 10:51:26 +02001455#if defined(TARGET_HAS_PRECISE_SMC)
Andreas Färber4917cf42013-05-27 05:17:50 +02001456 if (cpu != NULL) {
1457 env = cpu->env_ptr;
Andreas Färberd77953b2013-01-16 19:29:31 +01001458 }
Andreas Färber4917cf42013-05-27 05:17:50 +02001459#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001460
1461 /* we remove all the TBs in the range [start, end[ */
1462 /* XXX: see if in some cases it could be faster to invalidate all
1463 the code */
1464 tb = p->first_tb;
1465 while (tb != NULL) {
1466 n = (uintptr_t)tb & 3;
1467 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1468 tb_next = tb->page_next[n];
1469 /* NOTE: this is subtle as a TB may span two physical pages */
1470 if (n == 0) {
1471 /* NOTE: tb_end may be after the end of the page, but
1472 it is not a problem */
1473 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1474 tb_end = tb_start + tb->size;
1475 } else {
1476 tb_start = tb->page_addr[1];
1477 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1478 }
1479 if (!(tb_end <= start || tb_start >= end)) {
1480#ifdef TARGET_HAS_PRECISE_SMC
1481 if (current_tb_not_found) {
1482 current_tb_not_found = 0;
1483 current_tb = NULL;
Andreas Färber93afead2013-08-26 03:41:01 +02001484 if (cpu->mem_io_pc) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001485 /* now we have a real cpu fault */
Andreas Färber93afead2013-08-26 03:41:01 +02001486 current_tb = tb_find_pc(cpu->mem_io_pc);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001487 }
1488 }
1489 if (current_tb == tb &&
1490 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1491 /* If we are modifying the current TB, we must stop
1492 its execution. We could be more precise by checking
1493 that the modification is after the current PC, but it
1494 would require a specialized function to partially
1495 restore the CPU state */
1496
1497 current_tb_modified = 1;
Andreas Färber74f10512013-09-01 17:02:58 +02001498 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001499 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1500 &current_flags);
1501 }
1502#endif /* TARGET_HAS_PRECISE_SMC */
Blue Swirl5b6dd862012-12-02 16:04:43 +00001503 tb_phys_invalidate(tb, -1);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001504 }
1505 tb = tb_next;
1506 }
1507#if !defined(CONFIG_USER_ONLY)
1508 /* if no code remaining, no need to continue to use slow writes */
1509 if (!p->first_tb) {
1510 invalidate_page_bitmap(p);
Paolo Bonzinifc377bc2015-04-22 14:20:35 +02001511 tlb_unprotect_code(start);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001512 }
1513#endif
1514#ifdef TARGET_HAS_PRECISE_SMC
1515 if (current_tb_modified) {
1516 /* we generate a block containing just the instruction
1517 modifying the memory. It will ensure that it cannot modify
1518 itself */
Andreas Färber648f0342013-09-01 17:43:17 +02001519 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01001520 cpu_loop_exit_noexc(cpu);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001521 }
1522#endif
1523}
1524
Paolo Bonzini6fad4592015-08-11 12:42:55 +02001525#ifdef CONFIG_SOFTMMU
Alex Bennéeba051fb2016-10-27 16:10:16 +01001526/* len must be <= 8 and start must be a multiple of len.
1527 * Called via softmmu_template.h when code areas are written to with
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001528 * iothread mutex not held.
Alex Bennéeba051fb2016-10-27 16:10:16 +01001529 */
Blue Swirl5b6dd862012-12-02 16:04:43 +00001530void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1531{
1532 PageDesc *p;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001533
1534#if 0
1535 if (1) {
1536 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1537 cpu_single_env->mem_io_vaddr, len,
1538 cpu_single_env->eip,
1539 cpu_single_env->eip +
1540 (intptr_t)cpu_single_env->segs[R_CS].base);
1541 }
1542#endif
Alex Bennéeba051fb2016-10-27 16:10:16 +01001543 assert_memory_lock();
1544
Blue Swirl5b6dd862012-12-02 16:04:43 +00001545 p = page_find(start >> TARGET_PAGE_BITS);
1546 if (!p) {
1547 return;
1548 }
Paolo Bonzinifc377bc2015-04-22 14:20:35 +02001549 if (!p->code_bitmap &&
1550 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
Paolo Bonzini7d7500d2016-10-27 16:10:03 +01001551 /* build code bitmap. FIXME: writes should be protected by
1552 * tb_lock, reads by tb_lock or RCU.
1553 */
Paolo Bonzinifc377bc2015-04-22 14:20:35 +02001554 build_page_bitmap(p);
1555 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001556 if (p->code_bitmap) {
Emilio G. Cota510a6472015-04-22 17:50:52 -04001557 unsigned int nr;
1558 unsigned long b;
1559
1560 nr = start & ~TARGET_PAGE_MASK;
1561 b = p->code_bitmap[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1));
Blue Swirl5b6dd862012-12-02 16:04:43 +00001562 if (b & ((1 << len) - 1)) {
1563 goto do_invalidate;
1564 }
1565 } else {
1566 do_invalidate:
1567 tb_invalidate_phys_page_range(start, start + len, 1);
1568 }
1569}
Paolo Bonzini6fad4592015-08-11 12:42:55 +02001570#else
Peter Maydell75809222016-05-17 15:18:02 +01001571/* Called with mmap_lock held. If pc is not 0 then it indicates the
1572 * host PC of the faulting store instruction that caused this invalidate.
1573 * Returns true if the caller needs to abort execution of the current
1574 * TB (because it was modified by this store and the guest CPU has
1575 * precise-SMC semantics).
1576 */
1577static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001578{
1579 TranslationBlock *tb;
1580 PageDesc *p;
1581 int n;
1582#ifdef TARGET_HAS_PRECISE_SMC
1583 TranslationBlock *current_tb = NULL;
Andreas Färber4917cf42013-05-27 05:17:50 +02001584 CPUState *cpu = current_cpu;
1585 CPUArchState *env = NULL;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001586 int current_tb_modified = 0;
1587 target_ulong current_pc = 0;
1588 target_ulong current_cs_base = 0;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001589 uint32_t current_flags = 0;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001590#endif
1591
Alex Bennéeba051fb2016-10-27 16:10:16 +01001592 assert_memory_lock();
1593
Blue Swirl5b6dd862012-12-02 16:04:43 +00001594 addr &= TARGET_PAGE_MASK;
1595 p = page_find(addr >> TARGET_PAGE_BITS);
1596 if (!p) {
Peter Maydell75809222016-05-17 15:18:02 +01001597 return false;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001598 }
KONRAD Frederica5e99822016-10-27 16:10:06 +01001599
1600 tb_lock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001601 tb = p->first_tb;
1602#ifdef TARGET_HAS_PRECISE_SMC
1603 if (tb && pc != 0) {
1604 current_tb = tb_find_pc(pc);
1605 }
Andreas Färber4917cf42013-05-27 05:17:50 +02001606 if (cpu != NULL) {
1607 env = cpu->env_ptr;
Andreas Färberd77953b2013-01-16 19:29:31 +01001608 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001609#endif
1610 while (tb != NULL) {
1611 n = (uintptr_t)tb & 3;
1612 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1613#ifdef TARGET_HAS_PRECISE_SMC
1614 if (current_tb == tb &&
1615 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1616 /* If we are modifying the current TB, we must stop
1617 its execution. We could be more precise by checking
1618 that the modification is after the current PC, but it
1619 would require a specialized function to partially
1620 restore the CPU state */
1621
1622 current_tb_modified = 1;
Andreas Färber74f10512013-09-01 17:02:58 +02001623 cpu_restore_state_from_tb(cpu, current_tb, pc);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001624 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1625 &current_flags);
1626 }
1627#endif /* TARGET_HAS_PRECISE_SMC */
1628 tb_phys_invalidate(tb, addr);
1629 tb = tb->page_next[n];
1630 }
1631 p->first_tb = NULL;
1632#ifdef TARGET_HAS_PRECISE_SMC
1633 if (current_tb_modified) {
1634 /* we generate a block containing just the instruction
1635 modifying the memory. It will ensure that it cannot modify
1636 itself */
Andreas Färber648f0342013-09-01 17:43:17 +02001637 tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
KONRAD Frederica5e99822016-10-27 16:10:06 +01001638 /* tb_lock will be reset after cpu_loop_exit_noexc longjmps
1639 * back into the cpu_exec loop. */
Peter Maydell75809222016-05-17 15:18:02 +01001640 return true;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001641 }
1642#endif
KONRAD Frederica5e99822016-10-27 16:10:06 +01001643 tb_unlock();
1644
Peter Maydell75809222016-05-17 15:18:02 +01001645 return false;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001646}
1647#endif
1648
Blue Swirl5b6dd862012-12-02 16:04:43 +00001649/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1650 tb[1].tc_ptr. Return NULL if not found */
Blue Swirla8a826a2012-12-04 20:16:07 +00001651static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001652{
1653 int m_min, m_max, m;
1654 uintptr_t v;
1655 TranslationBlock *tb;
1656
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001657 if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001658 return NULL;
1659 }
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +07001660 if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1661 tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001662 return NULL;
1663 }
1664 /* binary search (cf Knuth) */
1665 m_min = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001666 m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001667 while (m_min <= m_max) {
1668 m = (m_min + m_max) >> 1;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001669 tb = &tcg_ctx.tb_ctx.tbs[m];
Blue Swirl5b6dd862012-12-02 16:04:43 +00001670 v = (uintptr_t)tb->tc_ptr;
1671 if (v == tc_ptr) {
1672 return tb;
1673 } else if (tc_ptr < v) {
1674 m_max = m - 1;
1675 } else {
1676 m_min = m + 1;
1677 }
1678 }
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001679 return &tcg_ctx.tb_ctx.tbs[m_max];
Blue Swirl5b6dd862012-12-02 16:04:43 +00001680}
1681
Peter Maydellec53b452015-01-20 15:19:32 +00001682#if !defined(CONFIG_USER_ONLY)
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +01001683void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001684{
1685 ram_addr_t ram_addr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001686 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001687 hwaddr l = 1;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001688
Paolo Bonzini41063e12015-03-18 14:21:43 +01001689 rcu_read_lock();
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +01001690 mr = address_space_translate(as, addr, &addr, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001691 if (!(memory_region_is_ram(mr)
1692 || memory_region_is_romd(mr))) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01001693 rcu_read_unlock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001694 return;
1695 }
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001696 ram_addr = memory_region_get_ram_addr(mr) + addr;
Alex Bennéeba051fb2016-10-27 16:10:16 +01001697 tb_lock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001698 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
Alex Bennéeba051fb2016-10-27 16:10:16 +01001699 tb_unlock();
Paolo Bonzini41063e12015-03-18 14:21:43 +01001700 rcu_read_unlock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001701}
Peter Maydellec53b452015-01-20 15:19:32 +00001702#endif /* !defined(CONFIG_USER_ONLY) */
Blue Swirl5b6dd862012-12-02 16:04:43 +00001703
Paolo Bonzini7d7500d2016-10-27 16:10:03 +01001704/* Called with tb_lock held. */
Andreas Färber239c51a2013-09-01 17:12:23 +02001705void tb_check_watchpoint(CPUState *cpu)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001706{
1707 TranslationBlock *tb;
1708
Andreas Färber93afead2013-08-26 03:41:01 +02001709 tb = tb_find_pc(cpu->mem_io_pc);
Aurelien Jarno8d302e72015-06-13 00:45:59 +02001710 if (tb) {
1711 /* We can use retranslation to find the PC. */
1712 cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
1713 tb_phys_invalidate(tb, -1);
1714 } else {
1715 /* The exception probably happened in a helper. The CPU state should
1716 have been saved before calling it. Fetch the PC from there. */
1717 CPUArchState *env = cpu->env_ptr;
1718 target_ulong pc, cs_base;
1719 tb_page_addr_t addr;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001720 uint32_t flags;
Aurelien Jarno8d302e72015-06-13 00:45:59 +02001721
1722 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
1723 addr = get_page_addr_code(env, pc);
1724 tb_invalidate_phys_range(addr, addr + 1);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001725 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001726}
1727
1728#ifndef CONFIG_USER_ONLY
Blue Swirl5b6dd862012-12-02 16:04:43 +00001729/* in deterministic execution mode, instructions doing device I/Os
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001730 * must be at the end of the TB.
1731 *
1732 * Called by softmmu_template.h, with iothread mutex not held.
1733 */
Andreas Färber90b40a62013-09-01 17:21:47 +02001734void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001735{
Andreas Färbera47dddd2013-09-03 17:38:47 +02001736#if defined(TARGET_MIPS) || defined(TARGET_SH4)
Andreas Färber90b40a62013-09-01 17:21:47 +02001737 CPUArchState *env = cpu->env_ptr;
Andreas Färbera47dddd2013-09-03 17:38:47 +02001738#endif
Blue Swirl5b6dd862012-12-02 16:04:43 +00001739 TranslationBlock *tb;
1740 uint32_t n, cflags;
1741 target_ulong pc, cs_base;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001742 uint32_t flags;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001743
KONRAD Frederica5e99822016-10-27 16:10:06 +01001744 tb_lock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001745 tb = tb_find_pc(retaddr);
1746 if (!tb) {
Andreas Färbera47dddd2013-09-03 17:38:47 +02001747 cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl5b6dd862012-12-02 16:04:43 +00001748 (void *)retaddr);
1749 }
Andreas Färber28ecfd72013-08-26 05:51:49 +02001750 n = cpu->icount_decr.u16.low + tb->icount;
Andreas Färber74f10512013-09-01 17:02:58 +02001751 cpu_restore_state_from_tb(cpu, tb, retaddr);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001752 /* Calculate how many instructions had been executed before the fault
1753 occurred. */
Andreas Färber28ecfd72013-08-26 05:51:49 +02001754 n = n - cpu->icount_decr.u16.low;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001755 /* Generate a new TB ending on the I/O insn. */
1756 n++;
1757 /* On MIPS and SH, delay slot instructions can only be restarted if
1758 they were already the first instruction in the TB. If this is not
1759 the first instruction in a TB then re-execute the preceding
1760 branch. */
1761#if defined(TARGET_MIPS)
1762 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
Maciej W. Rozyckic3577472014-11-07 20:05:35 +00001763 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
Andreas Färber28ecfd72013-08-26 05:51:49 +02001764 cpu->icount_decr.u16.low++;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001765 env->hflags &= ~MIPS_HFLAG_BMASK;
1766 }
1767#elif defined(TARGET_SH4)
1768 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1769 && n > 1) {
1770 env->pc -= 2;
Andreas Färber28ecfd72013-08-26 05:51:49 +02001771 cpu->icount_decr.u16.low++;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001772 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1773 }
1774#endif
1775 /* This should never happen. */
1776 if (n > CF_COUNT_MASK) {
Andreas Färbera47dddd2013-09-03 17:38:47 +02001777 cpu_abort(cpu, "TB too big during recompile");
Blue Swirl5b6dd862012-12-02 16:04:43 +00001778 }
1779
1780 cflags = n | CF_LAST_IO;
1781 pc = tb->pc;
1782 cs_base = tb->cs_base;
1783 flags = tb->flags;
1784 tb_phys_invalidate(tb, -1);
Sergey Fedorov02d57ea2015-06-30 12:35:09 +03001785 if (tb->cflags & CF_NOCACHE) {
1786 if (tb->orig_tb) {
1787 /* Invalidate original TB if this TB was generated in
1788 * cpu_exec_nocache() */
1789 tb_phys_invalidate(tb->orig_tb, -1);
1790 }
1791 tb_free(tb);
1792 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00001793 /* FIXME: In theory this could raise an exception. In practice
1794 we have already translated the block once so it's probably ok. */
Andreas Färber648f0342013-09-01 17:43:17 +02001795 tb_gen_code(cpu, pc, cs_base, flags, cflags);
KONRAD Frederica5e99822016-10-27 16:10:06 +01001796
Blue Swirl5b6dd862012-12-02 16:04:43 +00001797 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
KONRAD Frederica5e99822016-10-27 16:10:06 +01001798 * the first in the TB) then we end up generating a whole new TB and
1799 * repeating the fault, which is horribly inefficient.
1800 * Better would be to execute just this insn uncached, or generate a
1801 * second new TB.
1802 *
1803 * cpu_loop_exit_noexc will longjmp back to cpu_exec where the
1804 * tb_lock gets reset.
1805 */
Peter Maydell6886b982016-05-17 15:18:04 +01001806 cpu_loop_exit_noexc(cpu);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001807}
1808
Andreas Färber611d4f92013-09-01 17:52:07 +02001809void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001810{
1811 unsigned int i;
1812
1813 /* Discard jump cache entries for any tb which might potentially
1814 overlap the flushed page. */
1815 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
Andreas Färber8cd70432013-08-26 06:03:38 +02001816 memset(&cpu->tb_jmp_cache[i], 0,
Blue Swirl5b6dd862012-12-02 16:04:43 +00001817 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1818
1819 i = tb_jmp_cache_hash_page(addr);
Andreas Färber8cd70432013-08-26 06:03:38 +02001820 memset(&cpu->tb_jmp_cache[i], 0,
Blue Swirl5b6dd862012-12-02 16:04:43 +00001821 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1822}
1823
Emilio G. Cota7266ae92016-07-22 12:36:30 -04001824static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
1825 struct qht_stats hst)
1826{
1827 uint32_t hgram_opts;
1828 size_t hgram_bins;
1829 char *hgram;
1830
1831 if (!hst.head_buckets) {
1832 return;
1833 }
1834 cpu_fprintf(f, "TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n",
1835 hst.used_head_buckets, hst.head_buckets,
1836 (double)hst.used_head_buckets / hst.head_buckets * 100);
1837
1838 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1839 hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
1840 if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
1841 hgram_opts |= QDIST_PR_NODECIMAL;
1842 }
1843 hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
1844 cpu_fprintf(f, "TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n",
1845 qdist_avg(&hst.occupancy) * 100, hgram);
1846 g_free(hgram);
1847
1848 hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
1849 hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
1850 if (hgram_bins > 10) {
1851 hgram_bins = 10;
1852 } else {
1853 hgram_bins = 0;
1854 hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
1855 }
1856 hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
1857 cpu_fprintf(f, "TB hash avg chain %0.3f buckets. Histogram: %s\n",
1858 qdist_avg(&hst.chain), hgram);
1859 g_free(hgram);
1860}
1861
Blue Swirl5b6dd862012-12-02 16:04:43 +00001862void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1863{
1864 int i, target_code_size, max_target_code_size;
1865 int direct_jmp_count, direct_jmp2_count, cross_page;
1866 TranslationBlock *tb;
Emilio G. Cota329844d2016-06-08 14:55:33 -04001867 struct qht_stats hst;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001868
KONRAD Frederica5e99822016-10-27 16:10:06 +01001869 tb_lock();
1870
Blue Swirl5b6dd862012-12-02 16:04:43 +00001871 target_code_size = 0;
1872 max_target_code_size = 0;
1873 cross_page = 0;
1874 direct_jmp_count = 0;
1875 direct_jmp2_count = 0;
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001876 for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1877 tb = &tcg_ctx.tb_ctx.tbs[i];
Blue Swirl5b6dd862012-12-02 16:04:43 +00001878 target_code_size += tb->size;
1879 if (tb->size > max_target_code_size) {
1880 max_target_code_size = tb->size;
1881 }
1882 if (tb->page_addr[1] != -1) {
1883 cross_page++;
1884 }
Sergey Fedorovf3091012016-04-10 23:35:45 +03001885 if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001886 direct_jmp_count++;
Sergey Fedorovf3091012016-04-10 23:35:45 +03001887 if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001888 direct_jmp2_count++;
1889 }
1890 }
1891 }
1892 /* XXX: avoid using doubles ? */
1893 cpu_fprintf(f, "Translation buffer state:\n");
1894 cpu_fprintf(f, "gen code size %td/%zd\n",
Evgeny Voevodin0b0d3322013-02-01 01:47:22 +07001895 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
Richard Hendersonb125f9d2015-09-22 13:01:15 -07001896 tcg_ctx.code_gen_highwater - tcg_ctx.code_gen_buffer);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001897 cpu_fprintf(f, "TB count %d/%d\n",
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001898 tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001899 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001900 tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1901 tcg_ctx.tb_ctx.nb_tbs : 0,
1902 max_target_code_size);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001903 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001904 tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1905 tcg_ctx.code_gen_buffer) /
1906 tcg_ctx.tb_ctx.nb_tbs : 0,
1907 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1908 tcg_ctx.code_gen_buffer) /
1909 target_code_size : 0);
1910 cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1911 tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1912 tcg_ctx.tb_ctx.nb_tbs : 0);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001913 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
1914 direct_jmp_count,
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001915 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1916 tcg_ctx.tb_ctx.nb_tbs : 0,
Blue Swirl5b6dd862012-12-02 16:04:43 +00001917 direct_jmp2_count,
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001918 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1919 tcg_ctx.tb_ctx.nb_tbs : 0);
Emilio G. Cota329844d2016-06-08 14:55:33 -04001920
1921 qht_statistics_init(&tcg_ctx.tb_ctx.htable, &hst);
Emilio G. Cota7266ae92016-07-22 12:36:30 -04001922 print_qht_statistics(f, cpu_fprintf, hst);
Emilio G. Cota329844d2016-06-08 14:55:33 -04001923 qht_statistics_destroy(&hst);
1924
Blue Swirl5b6dd862012-12-02 16:04:43 +00001925 cpu_fprintf(f, "\nStatistics:\n");
Sergey Fedorov3359baa2016-08-02 18:27:43 +01001926 cpu_fprintf(f, "TB flush count %u\n",
1927 atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +07001928 cpu_fprintf(f, "TB invalidate count %d\n",
1929 tcg_ctx.tb_ctx.tb_phys_invalidate_count);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001930 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
1931 tcg_dump_info(f, cpu_fprintf);
KONRAD Frederica5e99822016-10-27 16:10:06 +01001932
1933 tb_unlock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00001934}
1935
Max Filippov246ae242014-11-02 11:04:18 +03001936void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
1937{
1938 tcg_dump_op_count(f, cpu_fprintf);
1939}
1940
Blue Swirl5b6dd862012-12-02 16:04:43 +00001941#else /* CONFIG_USER_ONLY */
1942
Andreas Färberc3affe52013-01-18 15:03:43 +01001943void cpu_interrupt(CPUState *cpu, int mask)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001944{
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001945 g_assert(qemu_mutex_iothread_locked());
Andreas Färber259186a2013-01-17 18:51:17 +01001946 cpu->interrupt_request |= mask;
Paolo Bonzini1aab16c2017-01-27 11:25:33 +01001947 cpu->icount_decr.u16.high = -1;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001948}
1949
1950/*
1951 * Walks guest process memory "regions" one by one
1952 * and calls callback function 'fn' for each region.
1953 */
1954struct walk_memory_regions_data {
1955 walk_memory_regions_fn fn;
1956 void *priv;
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001957 target_ulong start;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001958 int prot;
1959};
1960
1961static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001962 target_ulong end, int new_prot)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001963{
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001964 if (data->start != -1u) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001965 int rc = data->fn(data->priv, data->start, end, data->prot);
1966 if (rc != 0) {
1967 return rc;
1968 }
1969 }
1970
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001971 data->start = (new_prot ? end : -1u);
Blue Swirl5b6dd862012-12-02 16:04:43 +00001972 data->prot = new_prot;
1973
1974 return 0;
1975}
1976
1977static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001978 target_ulong base, int level, void **lp)
Blue Swirl5b6dd862012-12-02 16:04:43 +00001979{
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04001980 target_ulong pa;
Blue Swirl5b6dd862012-12-02 16:04:43 +00001981 int i, rc;
1982
1983 if (*lp == NULL) {
1984 return walk_memory_regions_end(data, base, 0);
1985 }
1986
1987 if (level == 0) {
1988 PageDesc *pd = *lp;
1989
Paolo Bonzini03f49952013-11-07 17:14:36 +01001990 for (i = 0; i < V_L2_SIZE; ++i) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00001991 int prot = pd[i].flags;
1992
1993 pa = base | (i << TARGET_PAGE_BITS);
1994 if (prot != data->prot) {
1995 rc = walk_memory_regions_end(data, pa, prot);
1996 if (rc != 0) {
1997 return rc;
1998 }
1999 }
2000 }
2001 } else {
2002 void **pp = *lp;
2003
Paolo Bonzini03f49952013-11-07 17:14:36 +01002004 for (i = 0; i < V_L2_SIZE; ++i) {
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04002005 pa = base | ((target_ulong)i <<
Paolo Bonzini03f49952013-11-07 17:14:36 +01002006 (TARGET_PAGE_BITS + V_L2_BITS * level));
Blue Swirl5b6dd862012-12-02 16:04:43 +00002007 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2008 if (rc != 0) {
2009 return rc;
2010 }
2011 }
2012 }
2013
2014 return 0;
2015}
2016
2017int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2018{
2019 struct walk_memory_regions_data data;
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +01002020 uintptr_t i, l1_sz = v_l1_size;
Blue Swirl5b6dd862012-12-02 16:04:43 +00002021
2022 data.fn = fn;
2023 data.priv = priv;
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04002024 data.start = -1u;
Blue Swirl5b6dd862012-12-02 16:04:43 +00002025 data.prot = 0;
2026
Vijaya Kumar K66ec9f42016-10-24 16:26:49 +01002027 for (i = 0; i < l1_sz; i++) {
2028 target_ulong base = i << (v_l1_shift + TARGET_PAGE_BITS);
2029 int rc = walk_memory_regions_1(&data, base, v_l2_levels, l1_map + i);
Blue Swirl5b6dd862012-12-02 16:04:43 +00002030 if (rc != 0) {
2031 return rc;
2032 }
2033 }
2034
2035 return walk_memory_regions_end(&data, 0, 0);
2036}
2037
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04002038static int dump_region(void *priv, target_ulong start,
2039 target_ulong end, unsigned long prot)
Blue Swirl5b6dd862012-12-02 16:04:43 +00002040{
2041 FILE *f = (FILE *)priv;
2042
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04002043 (void) fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx
2044 " "TARGET_FMT_lx" %c%c%c\n",
Blue Swirl5b6dd862012-12-02 16:04:43 +00002045 start, end, end - start,
2046 ((prot & PAGE_READ) ? 'r' : '-'),
2047 ((prot & PAGE_WRITE) ? 'w' : '-'),
2048 ((prot & PAGE_EXEC) ? 'x' : '-'));
2049
2050 return 0;
2051}
2052
2053/* dump memory mappings */
2054void page_dump(FILE *f)
2055{
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04002056 const int length = sizeof(target_ulong) * 2;
Stefan Weil227b8172013-09-12 20:09:06 +02002057 (void) fprintf(f, "%-*s %-*s %-*s %s\n",
2058 length, "start", length, "end", length, "size", "prot");
Blue Swirl5b6dd862012-12-02 16:04:43 +00002059 walk_memory_regions(f, dump_region);
2060}
2061
2062int page_get_flags(target_ulong address)
2063{
2064 PageDesc *p;
2065
2066 p = page_find(address >> TARGET_PAGE_BITS);
2067 if (!p) {
2068 return 0;
2069 }
2070 return p->flags;
2071}
2072
2073/* Modify the flags of a page and invalidate the code if necessary.
2074 The flag PAGE_WRITE_ORG is positioned automatically depending
2075 on PAGE_WRITE. The mmap_lock should already be held. */
2076void page_set_flags(target_ulong start, target_ulong end, int flags)
2077{
2078 target_ulong addr, len;
2079
2080 /* This function should never be called with addresses outside the
2081 guest address space. If this assert fires, it probably indicates
2082 a missing call to h2g_valid. */
2083#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04002084 assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Blue Swirl5b6dd862012-12-02 16:04:43 +00002085#endif
2086 assert(start < end);
Alex Bennéee505a062016-10-27 16:10:05 +01002087 assert_memory_lock();
Blue Swirl5b6dd862012-12-02 16:04:43 +00002088
2089 start = start & TARGET_PAGE_MASK;
2090 end = TARGET_PAGE_ALIGN(end);
2091
2092 if (flags & PAGE_WRITE) {
2093 flags |= PAGE_WRITE_ORG;
2094 }
2095
2096 for (addr = start, len = end - start;
2097 len != 0;
2098 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2099 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2100
2101 /* If the write protection bit is set, then we invalidate
2102 the code inside. */
2103 if (!(p->flags & PAGE_WRITE) &&
2104 (flags & PAGE_WRITE) &&
2105 p->first_tb) {
Peter Maydell75809222016-05-17 15:18:02 +01002106 tb_invalidate_phys_page(addr, 0);
Blue Swirl5b6dd862012-12-02 16:04:43 +00002107 }
2108 p->flags = flags;
2109 }
2110}
2111
2112int page_check_range(target_ulong start, target_ulong len, int flags)
2113{
2114 PageDesc *p;
2115 target_ulong end;
2116 target_ulong addr;
2117
2118 /* This function should never be called with addresses outside the
2119 guest address space. If this assert fires, it probably indicates
2120 a missing call to h2g_valid. */
2121#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +04002122 assert(start < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Blue Swirl5b6dd862012-12-02 16:04:43 +00002123#endif
2124
2125 if (len == 0) {
2126 return 0;
2127 }
2128 if (start + len - 1 < start) {
2129 /* We've wrapped around. */
2130 return -1;
2131 }
2132
2133 /* must do before we loose bits in the next step */
2134 end = TARGET_PAGE_ALIGN(start + len);
2135 start = start & TARGET_PAGE_MASK;
2136
2137 for (addr = start, len = end - start;
2138 len != 0;
2139 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2140 p = page_find(addr >> TARGET_PAGE_BITS);
2141 if (!p) {
2142 return -1;
2143 }
2144 if (!(p->flags & PAGE_VALID)) {
2145 return -1;
2146 }
2147
2148 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
2149 return -1;
2150 }
2151 if (flags & PAGE_WRITE) {
2152 if (!(p->flags & PAGE_WRITE_ORG)) {
2153 return -1;
2154 }
2155 /* unprotect the page if it was put read-only because it
2156 contains translated code */
2157 if (!(p->flags & PAGE_WRITE)) {
Peter Maydellf213e722016-05-17 15:18:03 +01002158 if (!page_unprotect(addr, 0)) {
Blue Swirl5b6dd862012-12-02 16:04:43 +00002159 return -1;
2160 }
2161 }
Blue Swirl5b6dd862012-12-02 16:04:43 +00002162 }
2163 }
2164 return 0;
2165}
2166
2167/* called from signal handler: invalidate the code and unprotect the
Peter Maydellf213e722016-05-17 15:18:03 +01002168 * page. Return 0 if the fault was not handled, 1 if it was handled,
2169 * and 2 if it was handled but the caller must cause the TB to be
2170 * immediately exited. (We can only return 2 if the 'pc' argument is
2171 * non-zero.)
2172 */
2173int page_unprotect(target_ulong address, uintptr_t pc)
Blue Swirl5b6dd862012-12-02 16:04:43 +00002174{
2175 unsigned int prot;
Stanislav Shmarov7399a332016-07-07 11:33:12 +03002176 bool current_tb_invalidated;
Blue Swirl5b6dd862012-12-02 16:04:43 +00002177 PageDesc *p;
2178 target_ulong host_start, host_end, addr;
2179
2180 /* Technically this isn't safe inside a signal handler. However we
2181 know this only ever happens in a synchronous SEGV handler, so in
2182 practice it seems to be ok. */
2183 mmap_lock();
2184
2185 p = page_find(address >> TARGET_PAGE_BITS);
2186 if (!p) {
2187 mmap_unlock();
2188 return 0;
2189 }
2190
2191 /* if the page was really writable, then we change its
2192 protection back to writable */
2193 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2194 host_start = address & qemu_host_page_mask;
2195 host_end = host_start + qemu_host_page_size;
2196
2197 prot = 0;
Stanislav Shmarov7399a332016-07-07 11:33:12 +03002198 current_tb_invalidated = false;
Blue Swirl5b6dd862012-12-02 16:04:43 +00002199 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2200 p = page_find(addr >> TARGET_PAGE_BITS);
2201 p->flags |= PAGE_WRITE;
2202 prot |= p->flags;
2203
2204 /* and since the content will be modified, we must invalidate
2205 the corresponding translated code. */
Stanislav Shmarov7399a332016-07-07 11:33:12 +03002206 current_tb_invalidated |= tb_invalidate_phys_page(addr, pc);
Blue Swirl5b6dd862012-12-02 16:04:43 +00002207#ifdef DEBUG_TB_CHECK
2208 tb_invalidate_check(addr);
2209#endif
2210 }
2211 mprotect((void *)g2h(host_start), qemu_host_page_size,
2212 prot & PAGE_BITS);
2213
2214 mmap_unlock();
Stanislav Shmarov7399a332016-07-07 11:33:12 +03002215 /* If current TB was invalidated return to main loop */
2216 return current_tb_invalidated ? 2 : 1;
Blue Swirl5b6dd862012-12-02 16:04:43 +00002217 }
2218 mmap_unlock();
2219 return 0;
2220}
2221#endif /* CONFIG_USER_ONLY */