Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Physical memory management |
| 3 | * |
| 4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates |
| 5 | * |
| 6 | * Authors: |
| 7 | * Avi Kivity <avi@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #include "memory.h" |
Avi Kivity | 1c0ffa5 | 2011-07-26 14:26:04 +0300 | [diff] [blame] | 15 | #include "exec-memory.h" |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 16 | #include <assert.h> |
| 17 | |
| 18 | typedef struct AddrRange AddrRange; |
| 19 | |
| 20 | struct AddrRange { |
| 21 | uint64_t start; |
| 22 | uint64_t size; |
| 23 | }; |
| 24 | |
| 25 | static AddrRange addrrange_make(uint64_t start, uint64_t size) |
| 26 | { |
| 27 | return (AddrRange) { start, size }; |
| 28 | } |
| 29 | |
| 30 | static bool addrrange_equal(AddrRange r1, AddrRange r2) |
| 31 | { |
| 32 | return r1.start == r2.start && r1.size == r2.size; |
| 33 | } |
| 34 | |
| 35 | static uint64_t addrrange_end(AddrRange r) |
| 36 | { |
| 37 | return r.start + r.size; |
| 38 | } |
| 39 | |
| 40 | static AddrRange addrrange_shift(AddrRange range, int64_t delta) |
| 41 | { |
| 42 | range.start += delta; |
| 43 | return range; |
| 44 | } |
| 45 | |
| 46 | static bool addrrange_intersects(AddrRange r1, AddrRange r2) |
| 47 | { |
| 48 | return (r1.start >= r2.start && r1.start < r2.start + r2.size) |
| 49 | || (r2.start >= r1.start && r2.start < r1.start + r1.size); |
| 50 | } |
| 51 | |
| 52 | static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) |
| 53 | { |
| 54 | uint64_t start = MAX(r1.start, r2.start); |
| 55 | /* off-by-one arithmetic to prevent overflow */ |
| 56 | uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1); |
| 57 | return addrrange_make(start, end - start + 1); |
| 58 | } |
| 59 | |
| 60 | struct CoalescedMemoryRange { |
| 61 | AddrRange addr; |
| 62 | QTAILQ_ENTRY(CoalescedMemoryRange) link; |
| 63 | }; |
| 64 | |
| 65 | typedef struct FlatRange FlatRange; |
| 66 | typedef struct FlatView FlatView; |
| 67 | |
| 68 | /* Range of memory in the global map. Addresses are absolute. */ |
| 69 | struct FlatRange { |
| 70 | MemoryRegion *mr; |
| 71 | target_phys_addr_t offset_in_region; |
| 72 | AddrRange addr; |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 73 | uint8_t dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 74 | }; |
| 75 | |
| 76 | /* Flattened global view of current active memory hierarchy. Kept in sorted |
| 77 | * order. |
| 78 | */ |
| 79 | struct FlatView { |
| 80 | FlatRange *ranges; |
| 81 | unsigned nr; |
| 82 | unsigned nr_allocated; |
| 83 | }; |
| 84 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 85 | typedef struct AddressSpace AddressSpace; |
| 86 | typedef struct AddressSpaceOps AddressSpaceOps; |
| 87 | |
| 88 | /* A system address space - I/O, memory, etc. */ |
| 89 | struct AddressSpace { |
| 90 | const AddressSpaceOps *ops; |
| 91 | MemoryRegion *root; |
| 92 | FlatView current_map; |
| 93 | }; |
| 94 | |
| 95 | struct AddressSpaceOps { |
| 96 | void (*range_add)(AddressSpace *as, FlatRange *fr); |
| 97 | void (*range_del)(AddressSpace *as, FlatRange *fr); |
| 98 | void (*log_start)(AddressSpace *as, FlatRange *fr); |
| 99 | void (*log_stop)(AddressSpace *as, FlatRange *fr); |
| 100 | }; |
| 101 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 102 | #define FOR_EACH_FLAT_RANGE(var, view) \ |
| 103 | for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) |
| 104 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 105 | static bool flatrange_equal(FlatRange *a, FlatRange *b) |
| 106 | { |
| 107 | return a->mr == b->mr |
| 108 | && addrrange_equal(a->addr, b->addr) |
| 109 | && a->offset_in_region == b->offset_in_region; |
| 110 | } |
| 111 | |
| 112 | static void flatview_init(FlatView *view) |
| 113 | { |
| 114 | view->ranges = NULL; |
| 115 | view->nr = 0; |
| 116 | view->nr_allocated = 0; |
| 117 | } |
| 118 | |
| 119 | /* Insert a range into a given position. Caller is responsible for maintaining |
| 120 | * sorting order. |
| 121 | */ |
| 122 | static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) |
| 123 | { |
| 124 | if (view->nr == view->nr_allocated) { |
| 125 | view->nr_allocated = MAX(2 * view->nr, 10); |
| 126 | view->ranges = qemu_realloc(view->ranges, |
| 127 | view->nr_allocated * sizeof(*view->ranges)); |
| 128 | } |
| 129 | memmove(view->ranges + pos + 1, view->ranges + pos, |
| 130 | (view->nr - pos) * sizeof(FlatRange)); |
| 131 | view->ranges[pos] = *range; |
| 132 | ++view->nr; |
| 133 | } |
| 134 | |
| 135 | static void flatview_destroy(FlatView *view) |
| 136 | { |
| 137 | qemu_free(view->ranges); |
| 138 | } |
| 139 | |
Avi Kivity | 3d8e6bf | 2011-07-26 14:26:03 +0300 | [diff] [blame] | 140 | static bool can_merge(FlatRange *r1, FlatRange *r2) |
| 141 | { |
| 142 | return addrrange_end(r1->addr) == r2->addr.start |
| 143 | && r1->mr == r2->mr |
| 144 | && r1->offset_in_region + r1->addr.size == r2->offset_in_region |
| 145 | && r1->dirty_log_mask == r2->dirty_log_mask; |
| 146 | } |
| 147 | |
| 148 | /* Attempt to simplify a view by merging ajacent ranges */ |
| 149 | static void flatview_simplify(FlatView *view) |
| 150 | { |
| 151 | unsigned i, j; |
| 152 | |
| 153 | i = 0; |
| 154 | while (i < view->nr) { |
| 155 | j = i + 1; |
| 156 | while (j < view->nr |
| 157 | && can_merge(&view->ranges[j-1], &view->ranges[j])) { |
| 158 | view->ranges[i].addr.size += view->ranges[j].addr.size; |
| 159 | ++j; |
| 160 | } |
| 161 | ++i; |
| 162 | memmove(&view->ranges[i], &view->ranges[j], |
| 163 | (view->nr - j) * sizeof(view->ranges[j])); |
| 164 | view->nr -= j - i; |
| 165 | } |
| 166 | } |
| 167 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 168 | static void as_memory_range_add(AddressSpace *as, FlatRange *fr) |
| 169 | { |
| 170 | ram_addr_t phys_offset, region_offset; |
| 171 | |
| 172 | phys_offset = fr->mr->ram_addr; |
| 173 | region_offset = fr->offset_in_region; |
| 174 | /* cpu_register_physical_memory_log() wants region_offset for |
| 175 | * mmio, but prefers offseting phys_offset for RAM. Humour it. |
| 176 | */ |
| 177 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { |
| 178 | phys_offset += region_offset; |
| 179 | region_offset = 0; |
| 180 | } |
| 181 | |
| 182 | cpu_register_physical_memory_log(fr->addr.start, |
| 183 | fr->addr.size, |
| 184 | phys_offset, |
| 185 | region_offset, |
| 186 | fr->dirty_log_mask); |
| 187 | } |
| 188 | |
| 189 | static void as_memory_range_del(AddressSpace *as, FlatRange *fr) |
| 190 | { |
| 191 | cpu_register_physical_memory(fr->addr.start, fr->addr.size, |
| 192 | IO_MEM_UNASSIGNED); |
| 193 | } |
| 194 | |
| 195 | static void as_memory_log_start(AddressSpace *as, FlatRange *fr) |
| 196 | { |
| 197 | cpu_physical_log_start(fr->addr.start, fr->addr.size); |
| 198 | } |
| 199 | |
| 200 | static void as_memory_log_stop(AddressSpace *as, FlatRange *fr) |
| 201 | { |
| 202 | cpu_physical_log_stop(fr->addr.start, fr->addr.size); |
| 203 | } |
| 204 | |
| 205 | static const AddressSpaceOps address_space_ops_memory = { |
| 206 | .range_add = as_memory_range_add, |
| 207 | .range_del = as_memory_range_del, |
| 208 | .log_start = as_memory_log_start, |
| 209 | .log_stop = as_memory_log_stop, |
| 210 | }; |
| 211 | |
| 212 | static AddressSpace address_space_memory = { |
| 213 | .ops = &address_space_ops_memory, |
| 214 | }; |
| 215 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 216 | /* Render a memory region into the global view. Ranges in @view obscure |
| 217 | * ranges in @mr. |
| 218 | */ |
| 219 | static void render_memory_region(FlatView *view, |
| 220 | MemoryRegion *mr, |
| 221 | target_phys_addr_t base, |
| 222 | AddrRange clip) |
| 223 | { |
| 224 | MemoryRegion *subregion; |
| 225 | unsigned i; |
| 226 | target_phys_addr_t offset_in_region; |
| 227 | uint64_t remain; |
| 228 | uint64_t now; |
| 229 | FlatRange fr; |
| 230 | AddrRange tmp; |
| 231 | |
| 232 | base += mr->addr; |
| 233 | |
| 234 | tmp = addrrange_make(base, mr->size); |
| 235 | |
| 236 | if (!addrrange_intersects(tmp, clip)) { |
| 237 | return; |
| 238 | } |
| 239 | |
| 240 | clip = addrrange_intersection(tmp, clip); |
| 241 | |
| 242 | if (mr->alias) { |
| 243 | base -= mr->alias->addr; |
| 244 | base -= mr->alias_offset; |
| 245 | render_memory_region(view, mr->alias, base, clip); |
| 246 | return; |
| 247 | } |
| 248 | |
| 249 | /* Render subregions in priority order. */ |
| 250 | QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { |
| 251 | render_memory_region(view, subregion, base, clip); |
| 252 | } |
| 253 | |
| 254 | if (!mr->has_ram_addr) { |
| 255 | return; |
| 256 | } |
| 257 | |
| 258 | offset_in_region = clip.start - base; |
| 259 | base = clip.start; |
| 260 | remain = clip.size; |
| 261 | |
| 262 | /* Render the region itself into any gaps left by the current view. */ |
| 263 | for (i = 0; i < view->nr && remain; ++i) { |
| 264 | if (base >= addrrange_end(view->ranges[i].addr)) { |
| 265 | continue; |
| 266 | } |
| 267 | if (base < view->ranges[i].addr.start) { |
| 268 | now = MIN(remain, view->ranges[i].addr.start - base); |
| 269 | fr.mr = mr; |
| 270 | fr.offset_in_region = offset_in_region; |
| 271 | fr.addr = addrrange_make(base, now); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 272 | fr.dirty_log_mask = mr->dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 273 | flatview_insert(view, i, &fr); |
| 274 | ++i; |
| 275 | base += now; |
| 276 | offset_in_region += now; |
| 277 | remain -= now; |
| 278 | } |
| 279 | if (base == view->ranges[i].addr.start) { |
| 280 | now = MIN(remain, view->ranges[i].addr.size); |
| 281 | base += now; |
| 282 | offset_in_region += now; |
| 283 | remain -= now; |
| 284 | } |
| 285 | } |
| 286 | if (remain) { |
| 287 | fr.mr = mr; |
| 288 | fr.offset_in_region = offset_in_region; |
| 289 | fr.addr = addrrange_make(base, remain); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 290 | fr.dirty_log_mask = mr->dirty_log_mask; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 291 | flatview_insert(view, i, &fr); |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | /* Render a memory topology into a list of disjoint absolute ranges. */ |
| 296 | static FlatView generate_memory_topology(MemoryRegion *mr) |
| 297 | { |
| 298 | FlatView view; |
| 299 | |
| 300 | flatview_init(&view); |
| 301 | |
| 302 | render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX)); |
Avi Kivity | 3d8e6bf | 2011-07-26 14:26:03 +0300 | [diff] [blame] | 303 | flatview_simplify(&view); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 304 | |
| 305 | return view; |
| 306 | } |
| 307 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 308 | static void address_space_update_topology(AddressSpace *as) |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 309 | { |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 310 | FlatView old_view = as->current_map; |
| 311 | FlatView new_view = generate_memory_topology(as->root); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 312 | unsigned iold, inew; |
| 313 | FlatRange *frold, *frnew; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 314 | |
| 315 | /* Generate a symmetric difference of the old and new memory maps. |
| 316 | * Kill ranges in the old map, and instantiate ranges in the new map. |
| 317 | */ |
| 318 | iold = inew = 0; |
| 319 | while (iold < old_view.nr || inew < new_view.nr) { |
| 320 | if (iold < old_view.nr) { |
| 321 | frold = &old_view.ranges[iold]; |
| 322 | } else { |
| 323 | frold = NULL; |
| 324 | } |
| 325 | if (inew < new_view.nr) { |
| 326 | frnew = &new_view.ranges[inew]; |
| 327 | } else { |
| 328 | frnew = NULL; |
| 329 | } |
| 330 | |
| 331 | if (frold |
| 332 | && (!frnew |
| 333 | || frold->addr.start < frnew->addr.start |
| 334 | || (frold->addr.start == frnew->addr.start |
| 335 | && !flatrange_equal(frold, frnew)))) { |
| 336 | /* In old, but (not in new, or in new but attributes changed). */ |
| 337 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 338 | as->ops->range_del(as, frold); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 339 | ++iold; |
| 340 | } else if (frold && frnew && flatrange_equal(frold, frnew)) { |
| 341 | /* In both (logging may have changed) */ |
| 342 | |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 343 | if (frold->dirty_log_mask && !frnew->dirty_log_mask) { |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 344 | as->ops->log_stop(as, frnew); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 345 | } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 346 | as->ops->log_start(as, frnew); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 347 | } |
| 348 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 349 | ++iold; |
| 350 | ++inew; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 351 | } else { |
| 352 | /* In new */ |
| 353 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 354 | as->ops->range_add(as, frnew); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 355 | ++inew; |
| 356 | } |
| 357 | } |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 358 | as->current_map = new_view; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 359 | flatview_destroy(&old_view); |
| 360 | } |
| 361 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 362 | static void memory_region_update_topology(void) |
| 363 | { |
| 364 | address_space_update_topology(&address_space_memory); |
| 365 | } |
| 366 | |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 367 | void memory_region_init(MemoryRegion *mr, |
| 368 | const char *name, |
| 369 | uint64_t size) |
| 370 | { |
| 371 | mr->ops = NULL; |
| 372 | mr->parent = NULL; |
| 373 | mr->size = size; |
| 374 | mr->addr = 0; |
| 375 | mr->offset = 0; |
| 376 | mr->has_ram_addr = false; |
| 377 | mr->priority = 0; |
| 378 | mr->may_overlap = false; |
| 379 | mr->alias = NULL; |
| 380 | QTAILQ_INIT(&mr->subregions); |
| 381 | memset(&mr->subregions_link, 0, sizeof mr->subregions_link); |
| 382 | QTAILQ_INIT(&mr->coalesced); |
| 383 | mr->name = qemu_strdup(name); |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 384 | mr->dirty_log_mask = 0; |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | static bool memory_region_access_valid(MemoryRegion *mr, |
| 388 | target_phys_addr_t addr, |
| 389 | unsigned size) |
| 390 | { |
| 391 | if (!mr->ops->valid.unaligned && (addr & (size - 1))) { |
| 392 | return false; |
| 393 | } |
| 394 | |
| 395 | /* Treat zero as compatibility all valid */ |
| 396 | if (!mr->ops->valid.max_access_size) { |
| 397 | return true; |
| 398 | } |
| 399 | |
| 400 | if (size > mr->ops->valid.max_access_size |
| 401 | || size < mr->ops->valid.min_access_size) { |
| 402 | return false; |
| 403 | } |
| 404 | return true; |
| 405 | } |
| 406 | |
| 407 | static uint32_t memory_region_read_thunk_n(void *_mr, |
| 408 | target_phys_addr_t addr, |
| 409 | unsigned size) |
| 410 | { |
| 411 | MemoryRegion *mr = _mr; |
| 412 | unsigned access_size, access_size_min, access_size_max; |
| 413 | uint64_t access_mask; |
| 414 | uint32_t data = 0, tmp; |
| 415 | unsigned i; |
| 416 | |
| 417 | if (!memory_region_access_valid(mr, addr, size)) { |
| 418 | return -1U; /* FIXME: better signalling */ |
| 419 | } |
| 420 | |
| 421 | /* FIXME: support unaligned access */ |
| 422 | |
| 423 | access_size_min = mr->ops->impl.min_access_size; |
| 424 | if (!access_size_min) { |
| 425 | access_size_min = 1; |
| 426 | } |
| 427 | access_size_max = mr->ops->impl.max_access_size; |
| 428 | if (!access_size_max) { |
| 429 | access_size_max = 4; |
| 430 | } |
| 431 | access_size = MAX(MIN(size, access_size_max), access_size_min); |
| 432 | access_mask = -1ULL >> (64 - access_size * 8); |
| 433 | addr += mr->offset; |
| 434 | for (i = 0; i < size; i += access_size) { |
| 435 | /* FIXME: big-endian support */ |
| 436 | tmp = mr->ops->read(mr->opaque, addr + i, access_size); |
| 437 | data |= (tmp & access_mask) << (i * 8); |
| 438 | } |
| 439 | |
| 440 | return data; |
| 441 | } |
| 442 | |
| 443 | static void memory_region_write_thunk_n(void *_mr, |
| 444 | target_phys_addr_t addr, |
| 445 | unsigned size, |
| 446 | uint64_t data) |
| 447 | { |
| 448 | MemoryRegion *mr = _mr; |
| 449 | unsigned access_size, access_size_min, access_size_max; |
| 450 | uint64_t access_mask; |
| 451 | unsigned i; |
| 452 | |
| 453 | if (!memory_region_access_valid(mr, addr, size)) { |
| 454 | return; /* FIXME: better signalling */ |
| 455 | } |
| 456 | |
| 457 | /* FIXME: support unaligned access */ |
| 458 | |
| 459 | access_size_min = mr->ops->impl.min_access_size; |
| 460 | if (!access_size_min) { |
| 461 | access_size_min = 1; |
| 462 | } |
| 463 | access_size_max = mr->ops->impl.max_access_size; |
| 464 | if (!access_size_max) { |
| 465 | access_size_max = 4; |
| 466 | } |
| 467 | access_size = MAX(MIN(size, access_size_max), access_size_min); |
| 468 | access_mask = -1ULL >> (64 - access_size * 8); |
| 469 | addr += mr->offset; |
| 470 | for (i = 0; i < size; i += access_size) { |
| 471 | /* FIXME: big-endian support */ |
| 472 | mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask, |
| 473 | access_size); |
| 474 | } |
| 475 | } |
| 476 | |
| 477 | static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr) |
| 478 | { |
| 479 | return memory_region_read_thunk_n(mr, addr, 1); |
| 480 | } |
| 481 | |
| 482 | static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr) |
| 483 | { |
| 484 | return memory_region_read_thunk_n(mr, addr, 2); |
| 485 | } |
| 486 | |
| 487 | static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr) |
| 488 | { |
| 489 | return memory_region_read_thunk_n(mr, addr, 4); |
| 490 | } |
| 491 | |
| 492 | static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr, |
| 493 | uint32_t data) |
| 494 | { |
| 495 | memory_region_write_thunk_n(mr, addr, 1, data); |
| 496 | } |
| 497 | |
| 498 | static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr, |
| 499 | uint32_t data) |
| 500 | { |
| 501 | memory_region_write_thunk_n(mr, addr, 2, data); |
| 502 | } |
| 503 | |
| 504 | static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr, |
| 505 | uint32_t data) |
| 506 | { |
| 507 | memory_region_write_thunk_n(mr, addr, 4, data); |
| 508 | } |
| 509 | |
| 510 | static CPUReadMemoryFunc * const memory_region_read_thunk[] = { |
| 511 | memory_region_read_thunk_b, |
| 512 | memory_region_read_thunk_w, |
| 513 | memory_region_read_thunk_l, |
| 514 | }; |
| 515 | |
| 516 | static CPUWriteMemoryFunc * const memory_region_write_thunk[] = { |
| 517 | memory_region_write_thunk_b, |
| 518 | memory_region_write_thunk_w, |
| 519 | memory_region_write_thunk_l, |
| 520 | }; |
| 521 | |
| 522 | void memory_region_init_io(MemoryRegion *mr, |
| 523 | const MemoryRegionOps *ops, |
| 524 | void *opaque, |
| 525 | const char *name, |
| 526 | uint64_t size) |
| 527 | { |
| 528 | memory_region_init(mr, name, size); |
| 529 | mr->ops = ops; |
| 530 | mr->opaque = opaque; |
| 531 | mr->has_ram_addr = true; |
| 532 | mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk, |
| 533 | memory_region_write_thunk, |
| 534 | mr, |
| 535 | mr->ops->endianness); |
| 536 | } |
| 537 | |
| 538 | void memory_region_init_ram(MemoryRegion *mr, |
| 539 | DeviceState *dev, |
| 540 | const char *name, |
| 541 | uint64_t size) |
| 542 | { |
| 543 | memory_region_init(mr, name, size); |
| 544 | mr->has_ram_addr = true; |
| 545 | mr->ram_addr = qemu_ram_alloc(dev, name, size); |
| 546 | } |
| 547 | |
| 548 | void memory_region_init_ram_ptr(MemoryRegion *mr, |
| 549 | DeviceState *dev, |
| 550 | const char *name, |
| 551 | uint64_t size, |
| 552 | void *ptr) |
| 553 | { |
| 554 | memory_region_init(mr, name, size); |
| 555 | mr->has_ram_addr = true; |
| 556 | mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr); |
| 557 | } |
| 558 | |
| 559 | void memory_region_init_alias(MemoryRegion *mr, |
| 560 | const char *name, |
| 561 | MemoryRegion *orig, |
| 562 | target_phys_addr_t offset, |
| 563 | uint64_t size) |
| 564 | { |
| 565 | memory_region_init(mr, name, size); |
| 566 | mr->alias = orig; |
| 567 | mr->alias_offset = offset; |
| 568 | } |
| 569 | |
| 570 | void memory_region_destroy(MemoryRegion *mr) |
| 571 | { |
| 572 | assert(QTAILQ_EMPTY(&mr->subregions)); |
| 573 | memory_region_clear_coalescing(mr); |
| 574 | qemu_free((char *)mr->name); |
| 575 | } |
| 576 | |
| 577 | uint64_t memory_region_size(MemoryRegion *mr) |
| 578 | { |
| 579 | return mr->size; |
| 580 | } |
| 581 | |
| 582 | void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset) |
| 583 | { |
| 584 | mr->offset = offset; |
| 585 | } |
| 586 | |
| 587 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) |
| 588 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 589 | uint8_t mask = 1 << client; |
| 590 | |
| 591 | mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); |
| 592 | memory_region_update_topology(); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 593 | } |
| 594 | |
| 595 | bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr, |
| 596 | unsigned client) |
| 597 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 598 | assert(mr->has_ram_addr); |
| 599 | return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 600 | } |
| 601 | |
| 602 | void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr) |
| 603 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 604 | assert(mr->has_ram_addr); |
| 605 | return cpu_physical_memory_set_dirty(mr->ram_addr + addr); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 606 | } |
| 607 | |
| 608 | void memory_region_sync_dirty_bitmap(MemoryRegion *mr) |
| 609 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 610 | FlatRange *fr; |
| 611 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 612 | FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 613 | if (fr->mr == mr) { |
| 614 | cpu_physical_sync_dirty_bitmap(fr->addr.start, |
| 615 | fr->addr.start + fr->addr.size); |
| 616 | } |
| 617 | } |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 618 | } |
| 619 | |
| 620 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly) |
| 621 | { |
| 622 | /* FIXME */ |
| 623 | } |
| 624 | |
| 625 | void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr, |
| 626 | target_phys_addr_t size, unsigned client) |
| 627 | { |
Avi Kivity | 5a58334 | 2011-07-26 14:26:02 +0300 | [diff] [blame] | 628 | assert(mr->has_ram_addr); |
| 629 | cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
| 630 | mr->ram_addr + addr + size, |
| 631 | 1 << client); |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 632 | } |
| 633 | |
| 634 | void *memory_region_get_ram_ptr(MemoryRegion *mr) |
| 635 | { |
| 636 | if (mr->alias) { |
| 637 | return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; |
| 638 | } |
| 639 | |
| 640 | assert(mr->has_ram_addr); |
| 641 | |
| 642 | return qemu_get_ram_ptr(mr->ram_addr); |
| 643 | } |
| 644 | |
| 645 | static void memory_region_update_coalesced_range(MemoryRegion *mr) |
| 646 | { |
| 647 | FlatRange *fr; |
| 648 | CoalescedMemoryRange *cmr; |
| 649 | AddrRange tmp; |
| 650 | |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 651 | FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
Avi Kivity | 093bc2c | 2011-07-26 14:26:01 +0300 | [diff] [blame] | 652 | if (fr->mr == mr) { |
| 653 | qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size); |
| 654 | QTAILQ_FOREACH(cmr, &mr->coalesced, link) { |
| 655 | tmp = addrrange_shift(cmr->addr, |
| 656 | fr->addr.start - fr->offset_in_region); |
| 657 | if (!addrrange_intersects(tmp, fr->addr)) { |
| 658 | continue; |
| 659 | } |
| 660 | tmp = addrrange_intersection(tmp, fr->addr); |
| 661 | qemu_register_coalesced_mmio(tmp.start, tmp.size); |
| 662 | } |
| 663 | } |
| 664 | } |
| 665 | } |
| 666 | |
| 667 | void memory_region_set_coalescing(MemoryRegion *mr) |
| 668 | { |
| 669 | memory_region_clear_coalescing(mr); |
| 670 | memory_region_add_coalescing(mr, 0, mr->size); |
| 671 | } |
| 672 | |
| 673 | void memory_region_add_coalescing(MemoryRegion *mr, |
| 674 | target_phys_addr_t offset, |
| 675 | uint64_t size) |
| 676 | { |
| 677 | CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr)); |
| 678 | |
| 679 | cmr->addr = addrrange_make(offset, size); |
| 680 | QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); |
| 681 | memory_region_update_coalesced_range(mr); |
| 682 | } |
| 683 | |
| 684 | void memory_region_clear_coalescing(MemoryRegion *mr) |
| 685 | { |
| 686 | CoalescedMemoryRange *cmr; |
| 687 | |
| 688 | while (!QTAILQ_EMPTY(&mr->coalesced)) { |
| 689 | cmr = QTAILQ_FIRST(&mr->coalesced); |
| 690 | QTAILQ_REMOVE(&mr->coalesced, cmr, link); |
| 691 | qemu_free(cmr); |
| 692 | } |
| 693 | memory_region_update_coalesced_range(mr); |
| 694 | } |
| 695 | |
| 696 | static void memory_region_add_subregion_common(MemoryRegion *mr, |
| 697 | target_phys_addr_t offset, |
| 698 | MemoryRegion *subregion) |
| 699 | { |
| 700 | MemoryRegion *other; |
| 701 | |
| 702 | assert(!subregion->parent); |
| 703 | subregion->parent = mr; |
| 704 | subregion->addr = offset; |
| 705 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
| 706 | if (subregion->may_overlap || other->may_overlap) { |
| 707 | continue; |
| 708 | } |
| 709 | if (offset >= other->offset + other->size |
| 710 | || offset + subregion->size <= other->offset) { |
| 711 | continue; |
| 712 | } |
| 713 | printf("warning: subregion collision %llx/%llx vs %llx/%llx\n", |
| 714 | (unsigned long long)offset, |
| 715 | (unsigned long long)subregion->size, |
| 716 | (unsigned long long)other->offset, |
| 717 | (unsigned long long)other->size); |
| 718 | } |
| 719 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { |
| 720 | if (subregion->priority >= other->priority) { |
| 721 | QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); |
| 722 | goto done; |
| 723 | } |
| 724 | } |
| 725 | QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); |
| 726 | done: |
| 727 | memory_region_update_topology(); |
| 728 | } |
| 729 | |
| 730 | |
| 731 | void memory_region_add_subregion(MemoryRegion *mr, |
| 732 | target_phys_addr_t offset, |
| 733 | MemoryRegion *subregion) |
| 734 | { |
| 735 | subregion->may_overlap = false; |
| 736 | subregion->priority = 0; |
| 737 | memory_region_add_subregion_common(mr, offset, subregion); |
| 738 | } |
| 739 | |
| 740 | void memory_region_add_subregion_overlap(MemoryRegion *mr, |
| 741 | target_phys_addr_t offset, |
| 742 | MemoryRegion *subregion, |
| 743 | unsigned priority) |
| 744 | { |
| 745 | subregion->may_overlap = true; |
| 746 | subregion->priority = priority; |
| 747 | memory_region_add_subregion_common(mr, offset, subregion); |
| 748 | } |
| 749 | |
| 750 | void memory_region_del_subregion(MemoryRegion *mr, |
| 751 | MemoryRegion *subregion) |
| 752 | { |
| 753 | assert(subregion->parent == mr); |
| 754 | subregion->parent = NULL; |
| 755 | QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); |
| 756 | memory_region_update_topology(); |
| 757 | } |
Avi Kivity | 1c0ffa5 | 2011-07-26 14:26:04 +0300 | [diff] [blame] | 758 | |
| 759 | void set_system_memory_map(MemoryRegion *mr) |
| 760 | { |
Avi Kivity | cc31e6e | 2011-07-26 14:26:05 +0300 | [diff] [blame^] | 761 | address_space_memory.root = mr; |
Avi Kivity | 1c0ffa5 | 2011-07-26 14:26:04 +0300 | [diff] [blame] | 762 | memory_region_update_topology(); |
| 763 | } |