Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Physical memory management |
| 3 | * |
| 4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates |
| 5 | * |
| 6 | * Authors: |
| 7 | * Avi Kivity <avi@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #include "memory.h" |
Avi Kivity | 1c0ffa5 | 2011-07-26 14:26:04 +0300 | [diff] [blame] | 15 | #include "exec-memory.h" |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 16 | #include <assert.h> |
| 17 | |
| 18 | typedef struct AddrRange AddrRange; |
| 19 | |
| 20 | struct AddrRange { |
| 21 | uint64_t start; |
| 22 | uint64_t size; |
| 23 | }; |
| 24 | |
| 25 | static AddrRange addrrange_make(uint64_t start, uint64_t size) |
| 26 | { |
| 27 | return (AddrRange) { start, size }; |
| 28 | } |
| 29 | |
| 30 | static bool addrrange_equal(AddrRange r1, AddrRange r2) |
| 31 | { |
| 32 | return r1.start == r2.start && r1.size == r2.size; |
| 33 | } |
| 34 | |
| 35 | static uint64_t addrrange_end(AddrRange r) |
| 36 | { |
| 37 | return r.start + r.size; |
| 38 | } |
| 39 | |
| 40 | static AddrRange addrrange_shift(AddrRange range, int64_t delta) |
| 41 | { |
| 42 | range.start += delta; |
| 43 | return range; |
| 44 | } |
| 45 | |
| 46 | static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
| 47 | { |
| 48 | return (r1.start >= r2.start && r1.start < r2.start + r2.size) |
| 49 | || (r2.start >= r1.start && r2.start < r1.start + r1.size); |
| 50 | } |
| 51 | |
| 52 | static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) |
| 53 | { |
| 54 | uint64_t start = MAX(r1.start, r2.start); |
| 55 | /* off-by-one arithmetic to prevent overflow */ |
| 56 | uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1); |
| 57 | return addrrange_make(start, end - start + 1); |
| 58 | } |
| 59 | |
| 60 | struct CoalescedMemoryRange { |
| 61 | AddrRange addr; |
| 62 | QTAILQ_ENTRY(CoalescedMemoryRange) link; |
| 63 | }; |
| 64 | |
| 65 | typedef struct FlatRange FlatRange; |
| 66 | typedef struct FlatView FlatView; |
| 67 | |
| 68 | /* Range of memory in the global map. Addresses are absolute. */ |
| 69 | struct FlatRange { |
| 70 | MemoryRegion *mr; |
| 71 | target_phys_addr_t offset_in_region; |
| 72 | AddrRange addr; |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 73 | uint8_t dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 74 | }; |
| 75 | |
| 76 | /* Flattened global view of current active memory hierarchy. Kept in sorted |
| 77 | * order. |
| 78 | */ |
| 79 | struct FlatView { |
| 80 | FlatRange *ranges; |
| 81 | unsigned nr; |
| 82 | unsigned nr_allocated; |
| 83 | }; |
| 84 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 85 | typedef struct AddressSpace AddressSpace; |
| 86 | typedef struct AddressSpaceOps AddressSpaceOps; |
| 87 | |
| 88 | /* A system address space - I/O, memory, etc. */ |
| 89 | struct AddressSpace { |
| 90 | const AddressSpaceOps *ops; |
| 91 | MemoryRegion *root; |
| 92 | FlatView current_map; |
| 93 | }; |
| 94 | |
| 95 | struct AddressSpaceOps { |
| 96 | void (*range_add)(AddressSpace *as, FlatRange *fr); |
| 97 | void (*range_del)(AddressSpace *as, FlatRange *fr); |
| 98 | void (*log_start)(AddressSpace *as, FlatRange *fr); |
| 99 | void (*log_stop)(AddressSpace *as, FlatRange *fr); |
| 100 | }; |
| 101 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 102 | #define FOR_EACH_FLAT_RANGE(var, view) \ |
| 103 | for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) |
| 104 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 105 | static bool flatrange_equal(FlatRange *a, FlatRange *b) |
| 106 | { |
| 107 | return a->mr == b->mr |
| 108 | && addrrange_equal(a->addr, b->addr) |
| 109 | && a->offset_in_region == b->offset_in_region; |
| 110 | } |
| 111 | |
| 112 | static void flatview_init(FlatView *view) |
| 113 | { |
| 114 | view->ranges = NULL; |
| 115 | view->nr = 0; |
| 116 | view->nr_allocated = 0; |
| 117 | } |
| 118 | |
| 119 | /* Insert a range into a given position. Caller is responsible for maintaining |
| 120 | * sorting order. |
| 121 | */ |
| 122 | static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
| 123 | { |
| 124 | if (view->nr == view->nr_allocated) { |
| 125 | view->nr_allocated = MAX(2 * view->nr, 10); |
| 126 | view->ranges = qemu_realloc(view->ranges, |
| 127 | view->nr_allocated * sizeof(*view->ranges)); |
| 128 | } |
| 129 | memmove(view->ranges + pos + 1, view->ranges + pos, |
| 130 | (view->nr - pos) * sizeof(FlatRange)); |
| 131 | view->ranges[pos] = *range; |
| 132 | ++view->nr; |
| 133 | } |
| 134 | |
| 135 | static void flatview_destroy(FlatView *view) |
| 136 | { |
| 137 | qemu_free(view->ranges); |
| 138 | } |
| 139 | |
Avi Kivity | 3d8e6bf | 2011-07-26 14:26:03 +0300 | [diff] [blame] | 140 | static bool can_merge(FlatRange *r1, FlatRange *r2) |
| 141 | { |
| 142 | return addrrange_end(r1->addr) == r2->addr.start |
| 143 | && r1->mr == r2->mr |
| 144 | && r1->offset_in_region + r1->addr.size == r2->offset_in_region |
| 145 | && r1->dirty_log_mask == r2->dirty_log_mask; |
| 146 | } |
| 147 | |
| 148 | /* Attempt to simplify a view by merging ajacent ranges */ |
| 149 | static void flatview_simplify(FlatView *view) |
| 150 | { |
| 151 | unsigned i, j; |
| 152 | |
| 153 | i = 0; |
| 154 | while (i < view->nr) { |
| 155 | j = i + 1; |
| 156 | while (j < view->nr |
| 157 | && can_merge(&view->ranges[j-1], &view->ranges[j])) { |
| 158 | view->ranges[i].addr.size += view->ranges[j].addr.size; |
| 159 | ++j; |
| 160 | } |
| 161 | ++i; |
| 162 | memmove(&view->ranges[i], &view->ranges[j], |
| 163 | (view->nr - j) * sizeof(view->ranges[j])); |
| 164 | view->nr -= j - i; |
| 165 | } |
| 166 | } |
| 167 | |
Avi Kivity | 16ef61c | 2011-07-26 14:26:07 +0300 | [diff] [blame^] | 168 | static void memory_region_prepare_ram_addr(MemoryRegion *mr); |
| 169 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 170 | static void as_memory_range_add(AddressSpace *as, FlatRange *fr) |
| 171 | { |
| 172 | ram_addr_t phys_offset, region_offset; |
| 173 | |
Avi Kivity | 16ef61c | 2011-07-26 14:26:07 +0300 | [diff] [blame^] | 174 | memory_region_prepare_ram_addr(fr->mr); |
| 175 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 176 | phys_offset = fr->mr->ram_addr; |
| 177 | region_offset = fr->offset_in_region; |
| 178 | /* cpu_register_physical_memory_log() wants region_offset for |
| 179 | * mmio, but prefers offseting phys_offset for RAM. Humour it. |
| 180 | */ |
| 181 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { |
| 182 | phys_offset += region_offset; |
| 183 | region_offset = 0; |
| 184 | } |
| 185 | |
| 186 | cpu_register_physical_memory_log(fr->addr.start, |
| 187 | fr->addr.size, |
| 188 | phys_offset, |
| 189 | region_offset, |
| 190 | fr->dirty_log_mask); |
| 191 | } |
| 192 | |
| 193 | static void as_memory_range_del(AddressSpace *as, FlatRange *fr) |
| 194 | { |
| 195 | cpu_register_physical_memory(fr->addr.start, fr->addr.size, |
| 196 | IO_MEM_UNASSIGNED); |
| 197 | } |
| 198 | |
| 199 | static void as_memory_log_start(AddressSpace *as, FlatRange *fr) |
| 200 | { |
| 201 | cpu_physical_log_start(fr->addr.start, fr->addr.size); |
| 202 | } |
| 203 | |
| 204 | static void as_memory_log_stop(AddressSpace *as, FlatRange *fr) |
| 205 | { |
| 206 | cpu_physical_log_stop(fr->addr.start, fr->addr.size); |
| 207 | } |
| 208 | |
| 209 | static const AddressSpaceOps address_space_ops_memory = { |
| 210 | .range_add = as_memory_range_add, |
| 211 | .range_del = as_memory_range_del, |
| 212 | .log_start = as_memory_log_start, |
| 213 | .log_stop = as_memory_log_stop, |
| 214 | }; |
| 215 | |
| 216 | static AddressSpace address_space_memory = { |
| 217 | .ops = &address_space_ops_memory, |
| 218 | }; |
| 219 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 220 | /* Render a memory region into the global view. Ranges in @view obscure |
| 221 | * ranges in @mr. |
| 222 | */ |
| 223 | static void render_memory_region(FlatView *view, |
| 224 | MemoryRegion *mr, |
| 225 | target_phys_addr_t base, |
| 226 | AddrRange clip) |
| 227 | { |
| 228 | MemoryRegion *subregion; |
| 229 | unsigned i; |
| 230 | target_phys_addr_t offset_in_region; |
| 231 | uint64_t remain; |
| 232 | uint64_t now; |
| 233 | FlatRange fr; |
| 234 | AddrRange tmp; |
| 235 | |
| 236 | base += mr->addr; |
| 237 | |
| 238 | tmp = addrrange_make(base, mr->size); |
| 239 | |
| 240 | if (!addrrange_intersects(tmp, clip)) { |
| 241 | return; |
| 242 | } |
| 243 | |
| 244 | clip = addrrange_intersection(tmp, clip); |
| 245 | |
| 246 | if (mr->alias) { |
| 247 | base -= mr->alias->addr; |
| 248 | base -= mr->alias_offset; |
| 249 | render_memory_region(view, mr->alias, base, clip); |
| 250 | return; |
| 251 | } |
| 252 | |
| 253 | /* Render subregions in priority order. */ |
| 254 | QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
| 255 | render_memory_region(view, subregion, base, clip); |
| 256 | } |
| 257 | |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 258 | if (!mr->terminates) { |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 259 | return; |
| 260 | } |
| 261 | |
| 262 | offset_in_region = clip.start - base; |
| 263 | base = clip.start; |
| 264 | remain = clip.size; |
| 265 | |
| 266 | /* Render the region itself into any gaps left by the current view. */ |
| 267 | for (i = 0; i < view->nr && remain; ++i) { |
| 268 | if (base >= addrrange_end(view->ranges[i].addr)) { |
| 269 | continue; |
| 270 | } |
| 271 | if (base < view->ranges[i].addr.start) { |
| 272 | now = MIN(remain, view->ranges[i].addr.start - base); |
| 273 | fr.mr = mr; |
| 274 | fr.offset_in_region = offset_in_region; |
| 275 | fr.addr = addrrange_make(base, now); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 276 | fr.dirty_log_mask = mr->dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 277 | flatview_insert(view, i, &fr); |
| 278 | ++i; |
| 279 | base += now; |
| 280 | offset_in_region += now; |
| 281 | remain -= now; |
| 282 | } |
| 283 | if (base == view->ranges[i].addr.start) { |
| 284 | now = MIN(remain, view->ranges[i].addr.size); |
| 285 | base += now; |
| 286 | offset_in_region += now; |
| 287 | remain -= now; |
| 288 | } |
| 289 | } |
| 290 | if (remain) { |
| 291 | fr.mr = mr; |
| 292 | fr.offset_in_region = offset_in_region; |
| 293 | fr.addr = addrrange_make(base, remain); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 294 | fr.dirty_log_mask = mr->dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 295 | flatview_insert(view, i, &fr); |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | /* Render a memory topology into a list of disjoint absolute ranges. */ |
| 300 | static FlatView generate_memory_topology(MemoryRegion *mr) |
| 301 | { |
| 302 | FlatView view; |
| 303 | |
| 304 | flatview_init(&view); |
| 305 | |
| 306 | render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX)); |
Avi Kivity | 3d8e6bf | 2011-07-26 14:26:03 +0300 | [diff] [blame] | 307 | flatview_simplify(&view); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 308 | |
| 309 | return view; |
| 310 | } |
| 311 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 312 | static void address_space_update_topology(AddressSpace *as) |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 313 | { |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 314 | FlatView old_view = as->current_map; |
| 315 | FlatView new_view = generate_memory_topology(as->root); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 316 | unsigned iold, inew; |
| 317 | FlatRange *frold, *frnew; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 318 | |
| 319 | /* Generate a symmetric difference of the old and new memory maps. |
| 320 | * Kill ranges in the old map, and instantiate ranges in the new map. |
| 321 | */ |
| 322 | iold = inew = 0; |
| 323 | while (iold < old_view.nr || inew < new_view.nr) { |
| 324 | if (iold < old_view.nr) { |
| 325 | frold = &old_view.ranges[iold]; |
| 326 | } else { |
| 327 | frold = NULL; |
| 328 | } |
| 329 | if (inew < new_view.nr) { |
| 330 | frnew = &new_view.ranges[inew]; |
| 331 | } else { |
| 332 | frnew = NULL; |
| 333 | } |
| 334 | |
| 335 | if (frold |
| 336 | && (!frnew |
| 337 | || frold->addr.start < frnew->addr.start |
| 338 | || (frold->addr.start == frnew->addr.start |
| 339 | && !flatrange_equal(frold, frnew)))) { |
| 340 | /* In old, but (not in new, or in new but attributes changed). */ |
| 341 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 342 | as->ops->range_del(as, frold); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 343 | ++iold; |
| 344 | } else if (frold && frnew && flatrange_equal(frold, frnew)) { |
| 345 | /* In both (logging may have changed) */ |
| 346 | |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 347 | if (frold->dirty_log_mask && !frnew->dirty_log_mask) { |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 348 | as->ops->log_stop(as, frnew); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 349 | } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 350 | as->ops->log_start(as, frnew); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 351 | } |
| 352 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 353 | ++iold; |
| 354 | ++inew; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 355 | } else { |
| 356 | /* In new */ |
| 357 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 358 | as->ops->range_add(as, frnew); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 359 | ++inew; |
| 360 | } |
| 361 | } |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 362 | as->current_map = new_view; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 363 | flatview_destroy(&old_view); |
| 364 | } |
| 365 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 366 | static void memory_region_update_topology(void) |
| 367 | { |
| 368 | address_space_update_topology(&address_space_memory); |
| 369 | } |
| 370 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 371 | void memory_region_init(MemoryRegion *mr, |
| 372 | const char *name, |
| 373 | uint64_t size) |
| 374 | { |
| 375 | mr->ops = NULL; |
| 376 | mr->parent = NULL; |
| 377 | mr->size = size; |
| 378 | mr->addr = 0; |
| 379 | mr->offset = 0; |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 380 | mr->terminates = false; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 381 | mr->priority = 0; |
| 382 | mr->may_overlap = false; |
| 383 | mr->alias = NULL; |
| 384 | QTAILQ_INIT(&mr->subregions); |
| 385 | memset(&mr->subregions_link, 0, sizeof mr->subregions_link); |
| 386 | QTAILQ_INIT(&mr->coalesced); |
| 387 | mr->name = qemu_strdup(name); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 388 | mr->dirty_log_mask = 0; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 389 | } |
| 390 | |
| 391 | static bool memory_region_access_valid(MemoryRegion *mr, |
| 392 | target_phys_addr_t addr, |
| 393 | unsigned size) |
| 394 | { |
| 395 | if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
| 396 | return false; |
| 397 | } |
| 398 | |
| 399 | /* Treat zero as compatibility all valid */ |
| 400 | if (!mr->ops->valid.max_access_size) { |
| 401 | return true; |
| 402 | } |
| 403 | |
| 404 | if (size > mr->ops->valid.max_access_size |
| 405 | || size < mr->ops->valid.min_access_size) { |
| 406 | return false; |
| 407 | } |
| 408 | return true; |
| 409 | } |
| 410 | |
| 411 | static uint32_t memory_region_read_thunk_n(void *_mr, |
| 412 | target_phys_addr_t addr, |
| 413 | unsigned size) |
| 414 | { |
| 415 | MemoryRegion *mr = _mr; |
| 416 | unsigned access_size, access_size_min, access_size_max; |
| 417 | uint64_t access_mask; |
| 418 | uint32_t data = 0, tmp; |
| 419 | unsigned i; |
| 420 | |
| 421 | if (!memory_region_access_valid(mr, addr, size)) { |
| 422 | return -1U; /* FIXME: better signalling */ |
| 423 | } |
| 424 | |
| 425 | /* FIXME: support unaligned access */ |
| 426 | |
| 427 | access_size_min = mr->ops->impl.min_access_size; |
| 428 | if (!access_size_min) { |
| 429 | access_size_min = 1; |
| 430 | } |
| 431 | access_size_max = mr->ops->impl.max_access_size; |
| 432 | if (!access_size_max) { |
| 433 | access_size_max = 4; |
| 434 | } |
| 435 | access_size = MAX(MIN(size, access_size_max), access_size_min); |
| 436 | access_mask = -1ULL >> (64 - access_size * 8); |
| 437 | addr += mr->offset; |
| 438 | for (i = 0; i < size; i += access_size) { |
| 439 | /* FIXME: big-endian support */ |
| 440 | tmp = mr->ops->read(mr->opaque, addr + i, access_size); |
| 441 | data |= (tmp & access_mask) << (i * 8); |
| 442 | } |
| 443 | |
| 444 | return data; |
| 445 | } |
| 446 | |
| 447 | static void memory_region_write_thunk_n(void *_mr, |
| 448 | target_phys_addr_t addr, |
| 449 | unsigned size, |
| 450 | uint64_t data) |
| 451 | { |
| 452 | MemoryRegion *mr = _mr; |
| 453 | unsigned access_size, access_size_min, access_size_max; |
| 454 | uint64_t access_mask; |
| 455 | unsigned i; |
| 456 | |
| 457 | if (!memory_region_access_valid(mr, addr, size)) { |
| 458 | return; /* FIXME: better signalling */ |
| 459 | } |
| 460 | |
| 461 | /* FIXME: support unaligned access */ |
| 462 | |
| 463 | access_size_min = mr->ops->impl.min_access_size; |
| 464 | if (!access_size_min) { |
| 465 | access_size_min = 1; |
| 466 | } |
| 467 | access_size_max = mr->ops->impl.max_access_size; |
| 468 | if (!access_size_max) { |
| 469 | access_size_max = 4; |
| 470 | } |
| 471 | access_size = MAX(MIN(size, access_size_max), access_size_min); |
| 472 | access_mask = -1ULL >> (64 - access_size * 8); |
| 473 | addr += mr->offset; |
| 474 | for (i = 0; i < size; i += access_size) { |
| 475 | /* FIXME: big-endian support */ |
| 476 | mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask, |
| 477 | access_size); |
| 478 | } |
| 479 | } |
| 480 | |
| 481 | static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr) |
| 482 | { |
| 483 | return memory_region_read_thunk_n(mr, addr, 1); |
| 484 | } |
| 485 | |
| 486 | static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr) |
| 487 | { |
| 488 | return memory_region_read_thunk_n(mr, addr, 2); |
| 489 | } |
| 490 | |
| 491 | static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr) |
| 492 | { |
| 493 | return memory_region_read_thunk_n(mr, addr, 4); |
| 494 | } |
| 495 | |
| 496 | static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr, |
| 497 | uint32_t data) |
| 498 | { |
| 499 | memory_region_write_thunk_n(mr, addr, 1, data); |
| 500 | } |
| 501 | |
| 502 | static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr, |
| 503 | uint32_t data) |
| 504 | { |
| 505 | memory_region_write_thunk_n(mr, addr, 2, data); |
| 506 | } |
| 507 | |
| 508 | static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr, |
| 509 | uint32_t data) |
| 510 | { |
| 511 | memory_region_write_thunk_n(mr, addr, 4, data); |
| 512 | } |
| 513 | |
| 514 | static CPUReadMemoryFunc * const memory_region_read_thunk[] = { |
| 515 | memory_region_read_thunk_b, |
| 516 | memory_region_read_thunk_w, |
| 517 | memory_region_read_thunk_l, |
| 518 | }; |
| 519 | |
| 520 | static CPUWriteMemoryFunc * const memory_region_write_thunk[] = { |
| 521 | memory_region_write_thunk_b, |
| 522 | memory_region_write_thunk_w, |
| 523 | memory_region_write_thunk_l, |
| 524 | }; |
| 525 | |
Avi Kivity | 16ef61c | 2011-07-26 14:26:07 +0300 | [diff] [blame^] | 526 | static void memory_region_prepare_ram_addr(MemoryRegion *mr) |
| 527 | { |
| 528 | if (mr->backend_registered) { |
| 529 | return; |
| 530 | } |
| 531 | |
| 532 | mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk, |
| 533 | memory_region_write_thunk, |
| 534 | mr, |
| 535 | mr->ops->endianness); |
| 536 | mr->backend_registered = true; |
| 537 | } |
| 538 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 539 | void memory_region_init_io(MemoryRegion *mr, |
| 540 | const MemoryRegionOps *ops, |
| 541 | void *opaque, |
| 542 | const char *name, |
| 543 | uint64_t size) |
| 544 | { |
| 545 | memory_region_init(mr, name, size); |
| 546 | mr->ops = ops; |
| 547 | mr->opaque = opaque; |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 548 | mr->terminates = true; |
Avi Kivity | 16ef61c | 2011-07-26 14:26:07 +0300 | [diff] [blame^] | 549 | mr->backend_registered = false; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 550 | } |
| 551 | |
| 552 | void memory_region_init_ram(MemoryRegion *mr, |
| 553 | DeviceState *dev, |
| 554 | const char *name, |
| 555 | uint64_t size) |
| 556 | { |
| 557 | memory_region_init(mr, name, size); |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 558 | mr->terminates = true; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 559 | mr->ram_addr = qemu_ram_alloc(dev, name, size); |
Avi Kivity | 16ef61c | 2011-07-26 14:26:07 +0300 | [diff] [blame^] | 560 | mr->backend_registered = true; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | void memory_region_init_ram_ptr(MemoryRegion *mr, |
| 564 | DeviceState *dev, |
| 565 | const char *name, |
| 566 | uint64_t size, |
| 567 | void *ptr) |
| 568 | { |
| 569 | memory_region_init(mr, name, size); |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 570 | mr->terminates = true; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 571 | mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr); |
Avi Kivity | 16ef61c | 2011-07-26 14:26:07 +0300 | [diff] [blame^] | 572 | mr->backend_registered = true; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 573 | } |
| 574 | |
| 575 | void memory_region_init_alias(MemoryRegion *mr, |
| 576 | const char *name, |
| 577 | MemoryRegion *orig, |
| 578 | target_phys_addr_t offset, |
| 579 | uint64_t size) |
| 580 | { |
| 581 | memory_region_init(mr, name, size); |
| 582 | mr->alias = orig; |
| 583 | mr->alias_offset = offset; |
| 584 | } |
| 585 | |
| 586 | void memory_region_destroy(MemoryRegion *mr) |
| 587 | { |
| 588 | assert(QTAILQ_EMPTY(&mr->subregions)); |
| 589 | memory_region_clear_coalescing(mr); |
| 590 | qemu_free((char *)mr->name); |
| 591 | } |
| 592 | |
| 593 | uint64_t memory_region_size(MemoryRegion *mr) |
| 594 | { |
| 595 | return mr->size; |
| 596 | } |
| 597 | |
| 598 | void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset) |
| 599 | { |
| 600 | mr->offset = offset; |
| 601 | } |
| 602 | |
| 603 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
| 604 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 605 | uint8_t mask = 1 << client; |
| 606 | |
| 607 | mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
| 608 | memory_region_update_topology(); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 609 | } |
| 610 | |
| 611 | bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr, |
| 612 | unsigned client) |
| 613 | { |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 614 | assert(mr->terminates); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 615 | return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 616 | } |
| 617 | |
| 618 | void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr) |
| 619 | { |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 620 | assert(mr->terminates); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 621 | return cpu_physical_memory_set_dirty(mr->ram_addr + addr); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | void memory_region_sync_dirty_bitmap(MemoryRegion *mr) |
| 625 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 626 | FlatRange *fr; |
| 627 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 628 | FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 629 | if (fr->mr == mr) { |
| 630 | cpu_physical_sync_dirty_bitmap(fr->addr.start, |
| 631 | fr->addr.start + fr->addr.size); |
| 632 | } |
| 633 | } |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
| 637 | { |
| 638 | /* FIXME */ |
| 639 | } |
| 640 | |
| 641 | void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr, |
| 642 | target_phys_addr_t size, unsigned client) |
| 643 | { |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 644 | assert(mr->terminates); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 645 | cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
| 646 | mr->ram_addr + addr + size, |
| 647 | 1 << client); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 648 | } |
| 649 | |
| 650 | void *memory_region_get_ram_ptr(MemoryRegion *mr) |
| 651 | { |
| 652 | if (mr->alias) { |
| 653 | return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; |
| 654 | } |
| 655 | |
Avi Kivity | 14a3c10 | 2011-07-26 14:26:06 +0300 | [diff] [blame] | 656 | assert(mr->terminates); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 657 | |
| 658 | return qemu_get_ram_ptr(mr->ram_addr); |
| 659 | } |
| 660 | |
| 661 | static void memory_region_update_coalesced_range(MemoryRegion *mr) |
| 662 | { |
| 663 | FlatRange *fr; |
| 664 | CoalescedMemoryRange *cmr; |
| 665 | AddrRange tmp; |
| 666 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 667 | FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 668 | if (fr->mr == mr) { |
| 669 | qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size); |
| 670 | QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
| 671 | tmp = addrrange_shift(cmr->addr, |
| 672 | fr->addr.start - fr->offset_in_region); |
| 673 | if (!addrrange_intersects(tmp, fr->addr)) { |
| 674 | continue; |
| 675 | } |
| 676 | tmp = addrrange_intersection(tmp, fr->addr); |
| 677 | qemu_register_coalesced_mmio(tmp.start, tmp.size); |
| 678 | } |
| 679 | } |
| 680 | } |
| 681 | } |
| 682 | |
| 683 | void memory_region_set_coalescing(MemoryRegion *mr) |
| 684 | { |
| 685 | memory_region_clear_coalescing(mr); |
| 686 | memory_region_add_coalescing(mr, 0, mr->size); |
| 687 | } |
| 688 | |
| 689 | void memory_region_add_coalescing(MemoryRegion *mr, |
| 690 | target_phys_addr_t offset, |
| 691 | uint64_t size) |
| 692 | { |
| 693 | CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr)); |
| 694 | |
| 695 | cmr->addr = addrrange_make(offset, size); |
| 696 | QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
| 697 | memory_region_update_coalesced_range(mr); |
| 698 | } |
| 699 | |
| 700 | void memory_region_clear_coalescing(MemoryRegion *mr) |
| 701 | { |
| 702 | CoalescedMemoryRange *cmr; |
| 703 | |
| 704 | while (!QTAILQ_EMPTY(&mr->coalesced)) { |
| 705 | cmr = QTAILQ_FIRST(&mr->coalesced); |
| 706 | QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
| 707 | qemu_free(cmr); |
| 708 | } |
| 709 | memory_region_update_coalesced_range(mr); |
| 710 | } |
| 711 | |
| 712 | static void memory_region_add_subregion_common(MemoryRegion *mr, |
| 713 | target_phys_addr_t offset, |
| 714 | MemoryRegion *subregion) |
| 715 | { |
| 716 | MemoryRegion *other; |
| 717 | |
| 718 | assert(!subregion->parent); |
| 719 | subregion->parent = mr; |
| 720 | subregion->addr = offset; |
| 721 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
| 722 | if (subregion->may_overlap || other->may_overlap) { |
| 723 | continue; |
| 724 | } |
| 725 | if (offset >= other->offset + other->size |
| 726 | || offset + subregion->size <= other->offset) { |
| 727 | continue; |
| 728 | } |
| 729 | printf("warning: subregion collision %llx/%llx vs %llx/%llx\n", |
| 730 | (unsigned long long)offset, |
| 731 | (unsigned long long)subregion->size, |
| 732 | (unsigned long long)other->offset, |
| 733 | (unsigned long long)other->size); |
| 734 | } |
| 735 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
| 736 | if (subregion->priority >= other->priority) { |
| 737 | QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
| 738 | goto done; |
| 739 | } |
| 740 | } |
| 741 | QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
| 742 | done: |
| 743 | memory_region_update_topology(); |
| 744 | } |
| 745 | |
| 746 | |
| 747 | void memory_region_add_subregion(MemoryRegion *mr, |
| 748 | target_phys_addr_t offset, |
| 749 | MemoryRegion *subregion) |
| 750 | { |
| 751 | subregion->may_overlap = false; |
| 752 | subregion->priority = 0; |
| 753 | memory_region_add_subregion_common(mr, offset, subregion); |
| 754 | } |
| 755 | |
| 756 | void memory_region_add_subregion_overlap(MemoryRegion *mr, |
| 757 | target_phys_addr_t offset, |
| 758 | MemoryRegion *subregion, |
| 759 | unsigned priority) |
| 760 | { |
| 761 | subregion->may_overlap = true; |
| 762 | subregion->priority = priority; |
| 763 | memory_region_add_subregion_common(mr, offset, subregion); |
| 764 | } |
| 765 | |
| 766 | void memory_region_del_subregion(MemoryRegion *mr, |
| 767 | MemoryRegion *subregion) |
| 768 | { |
| 769 | assert(subregion->parent == mr); |
| 770 | subregion->parent = NULL; |
| 771 | QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
| 772 | memory_region_update_topology(); |
| 773 | } |
Avi Kivity | 1c0ffa5 | 2011-07-26 14:26:04 +0300 | [diff] [blame] | 774 | |
| 775 | void set_system_memory_map(MemoryRegion *mr) |
| 776 | { |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame] | 777 | address_space_memory.root = mr; |
Avi Kivity | 1c0ffa5 | 2011-07-26 14:26:04 +0300 | [diff] [blame] | 778 | memory_region_update_topology(); |
| 779 | } |