memory.c 76.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Physical memory management
 *
 * Copyright 2011 Red Hat, Inc. and/or its affiliates
 *
 * Authors:
 *  Avi Kivity <avi@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
12 13
 * Contributions after 2012-01-13 are licensed under the terms of the
 * GNU GPL, version 2 or (at your option) any later version.
14 15
 */

Peter Maydell's avatar
Peter Maydell committed
16
#include "qemu/osdep.h"
17
#include "qapi/error.h"
18 19
#include "qemu-common.h"
#include "cpu.h"
20 21 22
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "exec/ioport.h"
23
#include "qapi/visitor.h"
24
#include "qemu/bitops.h"
Pavel Fedin's avatar
Pavel Fedin committed
25
#include "qemu/error-report.h"
26
#include "qom/object.h"
27
#include "trace.h"
28

29
#include "exec/memory-internal.h"
30
#include "exec/ram_addr.h"
Pavel Fedin's avatar
Pavel Fedin committed
31
#include "sysemu/kvm.h"
32
#include "sysemu/sysemu.h"
33

34 35
//#define DEBUG_UNASSIGNED

36 37
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
38
static bool ioeventfd_update_pending;
39 40
static bool global_dirty_log = false;

41 42
static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
    = QTAILQ_HEAD_INITIALIZER(memory_listeners);
Avi Kivity's avatar
Avi Kivity committed
43

44 45 46
static QTAILQ_HEAD(, AddressSpace) address_spaces
    = QTAILQ_HEAD_INITIALIZER(address_spaces);

47 48
typedef struct AddrRange AddrRange;

49
/*
50
 * Note that signed integers are needed for negative offsetting in aliases
51 52
 * (large MemoryRegion::alias_offset).
 */
53
struct AddrRange {
54 55
    Int128 start;
    Int128 size;
56 57
};

58
static AddrRange addrrange_make(Int128 start, Int128 size)
59 60 61 62 63 64
{
    return (AddrRange) { start, size };
}

static bool addrrange_equal(AddrRange r1, AddrRange r2)
{
65
    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
66 67
}

68
static Int128 addrrange_end(AddrRange r)
69
{
70
    return int128_add(r.start, r.size);
71 72
}

73
static AddrRange addrrange_shift(AddrRange range, Int128 delta)
74
{
75
    int128_addto(&range.start, delta);
76 77 78
    return range;
}

79 80 81 82 83 84
static bool addrrange_contains(AddrRange range, Int128 addr)
{
    return int128_ge(addr, range.start)
        && int128_lt(addr, addrrange_end(range));
}

85 86
static bool addrrange_intersects(AddrRange r1, AddrRange r2)
{
87 88
    return addrrange_contains(r1, r2.start)
        || addrrange_contains(r2, r1.start);
89 90 91 92
}

static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
{
93 94 95
    Int128 start = int128_max(r1.start, r2.start);
    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
    return addrrange_make(start, int128_sub(end, start));
96 97
}

98 99
enum ListenerDirection { Forward, Reverse };

100 101 102 103 104 105 106 107
static bool memory_listener_match(MemoryListener *listener,
                                  MemoryRegionSection *section)
{
    return !listener->address_space_filter
        || listener->address_space_filter == section->address_space;
}

#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
108 109 110 111 112 113
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
114 115 116
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
117 118 119 120 121
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
                                   memory_listeners, link) {            \
122 123 124
                if (_listener->_callback) {                             \
                    _listener->_callback(_listener, ##_args);           \
                }                                                       \
125 126 127 128 129 130 131
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

132 133 134 135 136 137 138
#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
    do {                                                                \
        MemoryListener *_listener;                                      \
                                                                        \
        switch (_direction) {                                           \
        case Forward:                                                   \
            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
139 140
                if (_listener->_callback                                \
                    && memory_listener_match(_listener, _section)) {    \
141 142 143 144 145 146 147
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        case Reverse:                                                   \
            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
                                   memory_listeners, link) {            \
148 149
                if (_listener->_callback                                \
                    && memory_listener_match(_listener, _section)) {    \
150 151 152 153 154 155 156 157 158
                    _listener->_callback(_listener, _section, ##_args); \
                }                                                       \
            }                                                           \
            break;                                                      \
        default:                                                        \
            abort();                                                    \
        }                                                               \
    } while (0)

159
/* No need to ref/unref .mr, the FlatRange keeps it alive.  */
160
#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...)  \
161
    MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) {       \
162
        .mr = (fr)->mr,                                                 \
163
        .address_space = (as),                                          \
164
        .offset_within_region = (fr)->offset_in_region,                 \
165
        .size = (fr)->addr.size,                                        \
166
        .offset_within_address_space = int128_get64((fr)->addr.start),  \
167
        .readonly = (fr)->readonly,                                     \
168
              }), ##_args)
169

170 171 172 173 174
struct CoalescedMemoryRange {
    AddrRange addr;
    QTAILQ_ENTRY(CoalescedMemoryRange) link;
};

175 176 177 178
struct MemoryRegionIoeventfd {
    AddrRange addr;
    bool match_data;
    uint64_t data;
179
    EventNotifier *e;
180 181 182 183 184
};

static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
                                           MemoryRegionIoeventfd b)
{
185
    if (int128_lt(a.addr.start, b.addr.start)) {
186
        return true;
187
    } else if (int128_gt(a.addr.start, b.addr.start)) {
188
        return false;
189
    } else if (int128_lt(a.addr.size, b.addr.size)) {
190
        return true;
191
    } else if (int128_gt(a.addr.size, b.addr.size)) {
192 193 194 195 196 197 198 199 200 201 202 203
        return false;
    } else if (a.match_data < b.match_data) {
        return true;
    } else  if (a.match_data > b.match_data) {
        return false;
    } else if (a.match_data) {
        if (a.data < b.data) {
            return true;
        } else if (a.data > b.data) {
            return false;
        }
    }
204
    if (a.e < b.e) {
205
        return true;
206
    } else if (a.e > b.e) {
207 208 209 210 211 212 213 214 215 216 217 218
        return false;
    }
    return false;
}

static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
                                          MemoryRegionIoeventfd b)
{
    return !memory_region_ioeventfd_before(a, b)
        && !memory_region_ioeventfd_before(b, a);
}

219 220 221 222 223 224
typedef struct FlatRange FlatRange;
typedef struct FlatView FlatView;

/* Range of memory in the global map.  Addresses are absolute. */
struct FlatRange {
    MemoryRegion *mr;
225
    hwaddr offset_in_region;
226
    AddrRange addr;
227
    uint8_t dirty_log_mask;
228
    bool romd_mode;
229
    bool readonly;
230 231 232 233 234 235
};

/* Flattened global view of current active memory hierarchy.  Kept in sorted
 * order.
 */
struct FlatView {
236
    struct rcu_head rcu;
237
    unsigned ref;
238 239 240 241 242
    FlatRange *ranges;
    unsigned nr;
    unsigned nr_allocated;
};

243 244
typedef struct AddressSpaceOps AddressSpaceOps;

245 246 247 248 249 250 251
#define FOR_EACH_FLAT_RANGE(var, view)          \
    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)

static bool flatrange_equal(FlatRange *a, FlatRange *b)
{
    return a->mr == b->mr
        && addrrange_equal(a->addr, b->addr)
252
        && a->offset_in_region == b->offset_in_region
253
        && a->romd_mode == b->romd_mode
254
        && a->readonly == b->readonly;
255 256 257 258
}

static void flatview_init(FlatView *view)
{
259
    view->ref = 1;
260 261 262 263 264 265 266 267 268 269 270 271
    view->ranges = NULL;
    view->nr = 0;
    view->nr_allocated = 0;
}

/* Insert a range into a given position.  Caller is responsible for maintaining
 * sorting order.
 */
static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
{
    if (view->nr == view->nr_allocated) {
        view->nr_allocated = MAX(2 * view->nr, 10);
272
        view->ranges = g_realloc(view->ranges,
273 274 275 276 277
                                    view->nr_allocated * sizeof(*view->ranges));
    }
    memmove(view->ranges + pos + 1, view->ranges + pos,
            (view->nr - pos) * sizeof(FlatRange));
    view->ranges[pos] = *range;
278
    memory_region_ref(range->mr);
279 280 281 282 283
    ++view->nr;
}

static void flatview_destroy(FlatView *view)
{
284 285 286 287 288
    int i;

    for (i = 0; i < view->nr; i++) {
        memory_region_unref(view->ranges[i].mr);
    }
289
    g_free(view->ranges);
290
    g_free(view);
291 292
}

293 294 295 296 297 298 299 300 301 302 303 304
static void flatview_ref(FlatView *view)
{
    atomic_inc(&view->ref);
}

static void flatview_unref(FlatView *view)
{
    if (atomic_fetch_dec(&view->ref) == 1) {
        flatview_destroy(view);
    }
}

305 306
static bool can_merge(FlatRange *r1, FlatRange *r2)
{
307
    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
308
        && r1->mr == r2->mr
309 310 311
        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
                                r1->addr.size),
                     int128_make64(r2->offset_in_region))
312
        && r1->dirty_log_mask == r2->dirty_log_mask
313
        && r1->romd_mode == r2->romd_mode
314
        && r1->readonly == r2->readonly;
315 316
}

317
/* Attempt to simplify a view by merging adjacent ranges */
318 319 320 321 322 323 324 325 326
static void flatview_simplify(FlatView *view)
{
    unsigned i, j;

    i = 0;
    while (i < view->nr) {
        j = i + 1;
        while (j < view->nr
               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
327
            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
328 329 330 331 332 333 334 335 336
            ++j;
        }
        ++i;
        memmove(&view->ranges[i], &view->ranges[j],
                (view->nr - j) * sizeof(view->ranges[j]));
        view->nr -= j - i;
    }
}

337 338 339 340 341 342 343 344 345
static bool memory_region_big_endian(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
static bool memory_region_wrong_endianness(MemoryRegion *mr)
{
#ifdef TARGET_WORDS_BIGENDIAN
    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
#else
    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
#endif
}

static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
{
    if (memory_region_wrong_endianness(mr)) {
        switch (size) {
        case 1:
            break;
        case 2:
            *data = bswap16(*data);
            break;
        case 4:
            *data = bswap32(*data);
            break;
        case 8:
            *data = bswap64(*data);
            break;
        default:
            abort();
        }
    }
}

376 377 378 379 380 381 382 383 384 385 386 387 388 389
static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
{
    MemoryRegion *root;
    hwaddr abs_addr = offset;

    abs_addr += mr->addr;
    for (root = mr; root->container; ) {
        root = root->container;
        abs_addr += root->addr;
    }

    return abs_addr;
}

390 391 392 393 394 395 396 397
static int get_cpu_index(void)
{
    if (current_cpu) {
        return current_cpu->cpu_index;
    }
    return -1;
}

398 399 400 401 402 403 404 405 406 407 408
static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
                                                       hwaddr addr,
                                                       uint64_t *value,
                                                       unsigned size,
                                                       unsigned shift,
                                                       uint64_t mask,
                                                       MemTxAttrs attrs)
{
    uint64_t tmp;

    tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
409
    if (mr->subpage) {
410
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
411 412 413 414 415
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
416 417
    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
418
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
419
    }
420 421 422 423 424
    *value |= (tmp & mask) << shift;
    return MEMTX_OK;
}

static MemTxResult  memory_region_read_accessor(MemoryRegion *mr,
425 426 427 428
                                                hwaddr addr,
                                                uint64_t *value,
                                                unsigned size,
                                                unsigned shift,
429 430
                                                uint64_t mask,
                                                MemTxAttrs attrs)
431 432 433
{
    uint64_t tmp;

434
    tmp = mr->ops->read(mr->opaque, addr, size);
435
    if (mr->subpage) {
436
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
437 438 439 440 441
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
442 443
    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
444
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
445
    }
446
    *value |= (tmp & mask) << shift;
447
    return MEMTX_OK;
448 449
}

450 451 452 453 454 455 456
static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
                                                          hwaddr addr,
                                                          uint64_t *value,
                                                          unsigned size,
                                                          unsigned shift,
                                                          uint64_t mask,
                                                          MemTxAttrs attrs)
457
{
458 459
    uint64_t tmp = 0;
    MemTxResult r;
460

461
    r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
462
    if (mr->subpage) {
463
        trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
464 465 466 467 468
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
469 470
    } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
471
        trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
472
    }
473
    *value |= (tmp & mask) << shift;
474
    return r;
475 476
}

477 478 479 480 481 482 483
static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
                                                        hwaddr addr,
                                                        uint64_t *value,
                                                        unsigned size,
                                                        unsigned shift,
                                                        uint64_t mask,
                                                        MemTxAttrs attrs)
484 485 486 487
{
    uint64_t tmp;

    tmp = (*value >> shift) & mask;
488
    if (mr->subpage) {
489
        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
490 491 492 493 494
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
495 496
    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
497
        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
498
    }
499
    mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
500
    return MEMTX_OK;
501 502
}

503 504 505 506 507 508 509
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
                                                hwaddr addr,
                                                uint64_t *value,
                                                unsigned size,
                                                unsigned shift,
                                                uint64_t mask,
                                                MemTxAttrs attrs)
510 511 512 513
{
    uint64_t tmp;

    tmp = (*value >> shift) & mask;
514
    if (mr->subpage) {
515
        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
516 517 518 519 520
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
521 522
    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
523
        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
524
    }
525
    mr->ops->write(mr->opaque, addr, tmp, size);
526
    return MEMTX_OK;
527 528
}

529 530 531 532 533 534 535 536 537 538 539
static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
                                                           hwaddr addr,
                                                           uint64_t *value,
                                                           unsigned size,
                                                           unsigned shift,
                                                           uint64_t mask,
                                                           MemTxAttrs attrs)
{
    uint64_t tmp;

    tmp = (*value >> shift) & mask;
540
    if (mr->subpage) {
541
        trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
542 543 544 545 546
    } else if (mr == &io_mem_notdirty) {
        /* Accesses to code which has previously been translated into a TB show
         * up in the MMIO path, as accesses to the io_mem_notdirty
         * MemoryRegion. */
        trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
547 548
    } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
        hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
549
        trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
550
    }
551 552 553 554
    return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
}

static MemTxResult access_with_adjusted_size(hwaddr addr,
555 556 557 558
                                      uint64_t *value,
                                      unsigned size,
                                      unsigned access_size_min,
                                      unsigned access_size_max,
559 560 561 562 563 564 565 566 567
                                      MemTxResult (*access)(MemoryRegion *mr,
                                                            hwaddr addr,
                                                            uint64_t *value,
                                                            unsigned size,
                                                            unsigned shift,
                                                            uint64_t mask,
                                                            MemTxAttrs attrs),
                                      MemoryRegion *mr,
                                      MemTxAttrs attrs)
568 569 570 571
{
    uint64_t access_mask;
    unsigned access_size;
    unsigned i;
572
    MemTxResult r = MEMTX_OK;
573 574 575 576 577 578 579

    if (!access_size_min) {
        access_size_min = 1;
    }
    if (!access_size_max) {
        access_size_max = 4;
    }
580 581

    /* FIXME: support unaligned access? */
582 583
    access_size = MAX(MIN(size, access_size_max), access_size_min);
    access_mask = -1ULL >> (64 - access_size * 8);
584 585
    if (memory_region_big_endian(mr)) {
        for (i = 0; i < size; i += access_size) {
586 587
            r |= access(mr, addr + i, value, access_size,
                        (size - access_size - i) * 8, access_mask, attrs);
588 589 590
        }
    } else {
        for (i = 0; i < size; i += access_size) {
591 592
            r |= access(mr, addr + i, value, access_size, i * 8,
                        access_mask, attrs);
593
        }
594
    }
595
    return r;
596 597
}

598 599
static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
{
600 601
    AddressSpace *as;

602 603
    while (mr->container) {
        mr = mr->container;
604
    }
605 606 607 608
    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
        if (mr == as->root) {
            return as;
        }
609
    }
610
    return NULL;
611 612
}

613 614 615 616 617
/* Render a memory region into the global view.  Ranges in @view obscure
 * ranges in @mr.
 */
static void render_memory_region(FlatView *view,
                                 MemoryRegion *mr,
618
                                 Int128 base,
619 620
                                 AddrRange clip,
                                 bool readonly)
621 622 623
{
    MemoryRegion *subregion;
    unsigned i;
624
    hwaddr offset_in_region;
625 626
    Int128 remain;
    Int128 now;
627 628 629
    FlatRange fr;
    AddrRange tmp;

630 631 632 633
    if (!mr->enabled) {
        return;
    }

634
    int128_addto(&base, int128_make64(mr->addr));
635
    readonly |= mr->readonly;
636 637 638 639 640 641 642 643 644 645

    tmp = addrrange_make(base, mr->size);

    if (!addrrange_intersects(tmp, clip)) {
        return;
    }

    clip = addrrange_intersection(tmp, clip);

    if (mr->alias) {
646 647
        int128_subfrom(&base, int128_make64(mr->alias->addr));
        int128_subfrom(&base, int128_make64(mr->alias_offset));
648
        render_memory_region(view, mr->alias, base, clip, readonly);
649 650 651 652 653
        return;
    }

    /* Render subregions in priority order. */
    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
654
        render_memory_region(view, subregion, base, clip, readonly);
655 656
    }

657
    if (!mr->terminates) {
658 659 660
        return;
    }

661
    offset_in_region = int128_get64(int128_sub(clip.start, base));
662 663 664
    base = clip.start;
    remain = clip.size;

665
    fr.mr = mr;
666
    fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
667
    fr.romd_mode = mr->romd_mode;
668 669
    fr.readonly = readonly;

670
    /* Render the region itself into any gaps left by the current view. */
671 672
    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
673 674
            continue;
        }
675 676 677
        if (int128_lt(base, view->ranges[i].addr.start)) {
            now = int128_min(remain,
                             int128_sub(view->ranges[i].addr.start, base));
678 679 680 681
            fr.offset_in_region = offset_in_region;
            fr.addr = addrrange_make(base, now);
            flatview_insert(view, i, &fr);
            ++i;
682 683 684
            int128_addto(&base, now);
            offset_in_region += int128_get64(now);
            int128_subfrom(&remain, now);
685
        }
686 687 688 689 690 691
        now = int128_sub(int128_min(int128_add(base, remain),
                                    addrrange_end(view->ranges[i].addr)),
                         base);
        int128_addto(&base, now);
        offset_in_region += int128_get64(now);
        int128_subfrom(&remain, now);
692
    }
693
    if (int128_nz(remain)) {
694 695 696 697 698 699 700
        fr.offset_in_region = offset_in_region;
        fr.addr = addrrange_make(base, remain);
        flatview_insert(view, i, &fr);
    }
}

/* Render a memory topology into a list of disjoint absolute ranges. */
701
static FlatView *generate_memory_topology(MemoryRegion *mr)
702
{
703
    FlatView *view;
704

705 706
    view = g_new(FlatView, 1);
    flatview_init(view);
707

708
    if (mr) {
709
        render_memory_region(view, mr, int128_zero(),
710 711
                             addrrange_make(int128_zero(), int128_2_64()), false);
    }
712
    flatview_simplify(view);
713 714 715 716

    return view;
}

717 718 719 720 721 722 723
static void address_space_add_del_ioeventfds(AddressSpace *as,
                                             MemoryRegionIoeventfd *fds_new,
                                             unsigned fds_new_nb,
                                             MemoryRegionIoeventfd *fds_old,
                                             unsigned fds_old_nb)
{
    unsigned iold, inew;
724 725
    MemoryRegionIoeventfd *fd;
    MemoryRegionSection section;
726 727 728 729 730 731 732 733 734 735 736

    /* Generate a symmetric difference of the old and new fd sets, adding
     * and deleting as necessary.
     */

    iold = inew = 0;
    while (iold < fds_old_nb || inew < fds_new_nb) {
        if (iold < fds_old_nb
            && (inew == fds_new_nb
                || memory_region_ioeventfd_before(fds_old[iold],
                                                  fds_new[inew]))) {
737 738
            fd = &fds_old[iold];
            section = (MemoryRegionSection) {
739
                .address_space = as,
740
                .offset_within_address_space = int128_get64(fd->addr.start),
741
                .size = fd->addr.size,
742 743
            };
            MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
744
                                 fd->match_data, fd->data, fd->e);
745 746 747 748 749
            ++iold;
        } else if (inew < fds_new_nb
                   && (iold == fds_old_nb
                       || memory_region_ioeventfd_before(fds_new[inew],
                                                         fds_old[iold]))) {
750 751
            fd = &fds_new[inew];
            section = (MemoryRegionSection) {
752
                .address_space = as,
753
                .offset_within_address_space = int128_get64(fd->addr.start),
754
                .size = fd->addr.size,
755 756
            };
            MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
757
                                 fd->match_data, fd->data, fd->e);
758 759 760 761 762 763 764 765
            ++inew;
        } else {
            ++iold;
            ++inew;
        }
    }
}

766 767 768 769
static FlatView *address_space_get_flatview(AddressSpace *as)
{
    FlatView *view;

770 771
    rcu_read_lock();
    view = atomic_rcu_read(&as->current_map);
772
    flatview_ref(view);
773
    rcu_read_unlock();
774 775 776
    return view;
}

777 778
static void address_space_update_ioeventfds(AddressSpace *as)
{
779
    FlatView *view;
780 781 782 783 784 785
    FlatRange *fr;
    unsigned ioeventfd_nb = 0;
    MemoryRegionIoeventfd *ioeventfds = NULL;
    AddrRange tmp;
    unsigned i;

786
    view = address_space_get_flatview(as);
787
    FOR_EACH_FLAT_RANGE(fr, view) {
788 789
        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
790 791
                                  int128_sub(fr->addr.start,
                                             int128_make64(fr->offset_in_region)));
792 793
            if (addrrange_intersects(fr->addr, tmp)) {
                ++ioeventfd_nb;
794
                ioeventfds = g_realloc(ioeventfds,
795 796 797 798 799 800 801 802 803 804
                                          ioeventfd_nb * sizeof(*ioeventfds));
                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
                ioeventfds[ioeventfd_nb-1].addr = tmp;
            }
        }
    }

    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
                                     as->ioeventfds, as->ioeventfd_nb);

805
    g_free(as->ioeventfds);
806 807
    as->ioeventfds = ioeventfds;
    as->ioeventfd_nb = ioeventfd_nb;
808
    flatview_unref(view);
809 810
}

811
static void address_space_update_topology_pass(AddressSpace *as,
812 813
                                               const FlatView *old_view,
                                               const FlatView *new_view,
814
                                               bool adding)
815 816 817 818 819 820 821 822
{
    unsigned iold, inew;
    FlatRange *frold, *frnew;

    /* Generate a symmetric difference of the old and new memory maps.
     * Kill ranges in the old map, and instantiate ranges in the new map.
     */
    iold = inew = 0;
823 824 825
    while (iold < old_view->nr || inew < new_view->nr) {
        if (iold < old_view->nr) {
            frold = &old_view->ranges[iold];
826 827 828
        } else {
            frold = NULL;
        }
829 830
        if (inew < new_view->nr) {
            frnew = &new_view->ranges[inew];
831 832 833 834 835 836
        } else {
            frnew = NULL;
        }

        if (frold
            && (!frnew
837 838
                || int128_lt(frold->addr.start, frnew->addr.start)
                || (int128_eq(frold->addr.start, frnew->addr.start)
839
                    && !flatrange_equal(frold, frnew)))) {
840
            /* In old but not in new, or in both but attributes changed. */
841

842
            if (!adding) {
843
                MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
844 845
            }

846 847
            ++iold;
        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
848
            /* In both and unchanged (except logging may have changed) */
849

850
            if (adding) {
851
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
852 853 854 855 856 857 858 859 860
                if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
                                                  frold->dirty_log_mask,
                                                  frnew->dirty_log_mask);
                }
                if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
                                                  frold->dirty_log_mask,
                                                  frnew->dirty_log_mask);
861
                }
862 863
            }

864 865 866 867 868
            ++iold;
            ++inew;
        } else {
            /* In new */

869
            if (adding) {
870
                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
871 872
            }

873 874 875
            ++inew;
        }
    }
876 877 878 879 880
}


static void address_space_update_topology(AddressSpace *as)
{
881
    FlatView *old_view = address_space_get_flatview(as);
882
    FlatView *new_view = generate_memory_topology(as->root);
883 884 885 886

    address_space_update_topology_pass(as, old_view, new_view, false);
    address_space_update_topology_pass(as, old_view, new_view, true);

887 888 889
    /* Writes are protected by the BQL.  */
    atomic_rcu_set(&as->current_map, new_view);
    call_rcu(old_view, flatview_unref, rcu);
890 891 892 893 894 895 896 897 898

    /* Note that all the old MemoryRegions are still alive up to this
     * point.  This relieves most MemoryListeners from the need to
     * ref/unref the MemoryRegions they get---unless they use them
     * outside the iothread mutex, in which case precise reference
     * counting is necessary.
     */
    flatview_unref(old_view);

899
    address_space_update_ioeventfds(as);
900 901
}

Avi Kivity's avatar
Avi Kivity committed
902 903
void memory_region_transaction_begin(void)
{
904
    qemu_flush_coalesced_mmio_buffer();
Avi Kivity's avatar
Avi Kivity committed
905 906 907
    ++memory_region_transaction_depth;
}

908 909 910 911 912 913
static void memory_region_clear_pending(void)
{
    memory_region_update_pending = false;
    ioeventfd_update_pending = false;
}

Avi Kivity's avatar
Avi Kivity committed
914 915
void memory_region_transaction_commit(void)
{
916 917
    AddressSpace *as;

Avi Kivity's avatar
Avi Kivity committed
918 919
    assert(memory_region_transaction_depth);
    --memory_region_transaction_depth;
920 921 922
    if (!memory_region_transaction_depth) {
        if (memory_region_update_pending) {
            MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
923

924 925 926
            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
                address_space_update_topology(as);
            }
927

928 929 930 931 932 933 934 935
            MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
        } else if (ioeventfd_update_pending) {
            QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
                address_space_update_ioeventfds(as);
            }
        }
        memory_region_clear_pending();
   }
Avi Kivity's avatar
Avi Kivity committed
936 937
}

938 939 940 941 942 943
static void memory_region_destructor_none(MemoryRegion *mr)
{
}

static void memory_region_destructor_ram(MemoryRegion *mr)
{
944
    qemu_ram_free(mr->ram_block);
945 946
}

947 948
static void memory_region_destructor_rom_device(MemoryRegion *mr)
{
949
    qemu_ram_free(mr->ram_block);
950 951
}

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
static bool memory_region_need_escape(char c)
{
    return c == '/' || c == '[' || c == '\\' || c == ']';
}

static char *memory_region_escape_name(const char *name)
{
    const char *p;
    char *escaped, *q;
    uint8_t c;
    size_t bytes = 0;

    for (p = name; *p; p++) {
        bytes += memory_region_need_escape(*p) ? 4 : 1;
    }
    if (bytes == p - name) {
       return g_memdup(name, bytes + 1);
    }

    escaped = g_malloc(bytes + 1);
    for (p = name, q = escaped; *p; p++) {
        c = *p;
        if (unlikely(memory_region_need_escape(c))) {
            *q++ = '\\';
            *q++ = 'x';
            *q++ = "0123456789abcdef"[c >> 4];
            c = "0123456789abcdef"[c & 15];
        }
        *q++ = c;
    }
    *q = 0;
    return escaped;
}

986
void memory_region_init(MemoryRegion *mr,
987
                        Object *owner,
988 989 990
                        const char *name,
                        uint64_t size)
{
991
    object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
992 993 994 995
    mr->size = int128_make64(size);
    if (size == UINT64_MAX) {
        mr->size = int128_2_64();
    }
996
    mr->name = g_strdup(name);
997
    mr->owner = owner;
998
    mr->ram_block = NULL;
999 1000

    if (name) {
1001 1002
        char *escaped_name = memory_region_escape_name(name);
        char *name_array = g_strdup_printf("%s[*]", escaped_name);
1003 1004 1005 1006 1007

        if (!owner) {
            owner = container_get(qdev_get_machine(), "/unattached");
        }

1008
        object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1009
        object_unref(OBJECT(mr));
1010 1011
        g_free(name_array);
        g_free(escaped_name);
1012 1013 1014
    }
}

1015 1016
static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
1017 1018 1019 1020
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    uint64_t value = mr->addr;

1021
    visit_type_uint64(v, name, &value, errp);
1022 1023
}

1024 1025 1026
static void memory_region_get_container(Object *obj, Visitor *v,
                                        const char *name, void *opaque,
                                        Error **errp)
1027 1028 1029 1030 1031 1032 1033
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    gchar *path = (gchar *)"";

    if (mr->container) {
        path = object_get_canonical_path(OBJECT(mr->container));
    }
1034
    visit_type_str(v, name, &path, errp);
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
    if (mr->container) {
        g_free(path);
    }
}

static Object *memory_region_resolve_container(Object *obj, void *opaque,
                                               const char *part)
{
    MemoryRegion *mr = MEMORY_REGION(obj);

    return OBJECT(mr->container);
}

1048 1049 1050
static void memory_region_get_priority(Object *obj, Visitor *v,
                                       const char *name, void *opaque,
                                       Error **errp)
1051 1052 1053 1054
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    int32_t value = mr->priority;

1055
    visit_type_int32(v, name, &value, errp);
1056 1057
}

1058 1059
static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
                                   void *opaque, Error **errp)
1060 1061 1062 1063
{
    MemoryRegion *mr = MEMORY_REGION(obj);
    uint64_t value = memory_region_size(mr);

1064
    visit_type_uint64(v, name, &value, errp);
1065 1066
}

1067 1068 1069
static void memory_region_initfn(Object *obj)
{
    MemoryRegion *mr = MEMORY_REGION(obj);
1070
    ObjectProperty *op;
1071 1072

    mr->ops = &unassigned_mem_ops;
1073
    mr->enabled = true;
1074
    mr->romd_mode = true;
1075
    mr->global_locking = true;
1076
    mr->destructor = memory_region_destructor_none;
1077 1078
    QTAILQ_INIT(&mr->subregions);
    QTAILQ_INIT(&mr->coalesced);
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090

    op = object_property_add(OBJECT(mr), "container",
                             "link<" TYPE_MEMORY_REGION ">",
                             memory_region_get_container,
                             NULL, /* memory_region_set_container */
                             NULL, NULL, &error_abort);
    op->resolve = memory_region_resolve_container;

    object_property_add(OBJECT(mr), "addr", "uint64",
                        memory_region_get_addr,
                        NULL, /* memory_region_set_addr */
                        NULL, NULL, &error_abort);
1091 1092 1093 1094
    object_property_add(OBJECT(mr), "priority", "uint32",
                        memory_region_get_priority,
                        NULL, /* memory_region_set_priority */
                        NULL, NULL, &error_abort);
1095 1096 1097 1098
    object_property_add(OBJECT(mr), "size", "uint64",
                        memory_region_get_size,
                        NULL, /* memory_region_set_size, */
                        NULL, NULL, &error_abort);
1099 1100
}

1101 1102 1103 1104 1105 1106
static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
                                    unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
1107 1108
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
1109
    }
1110
    return 0;
1111 1112 1113 1114 1115 1116 1117 1118
}

static void unassigned_mem_write(void *opaque, hwaddr addr,
                                 uint64_t val, unsigned size)
{
#ifdef DEBUG_UNASSIGNED
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
#endif
1119 1120
    if (current_cpu != NULL) {
        cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1121
    }
1122 1123
}

1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
                                   unsigned size, bool is_write)
{
    return false;
}

const MemoryRegionOps unassigned_mem_ops = {
    .valid.accepts = unassigned_mem_accepts,
    .endianness = DEVICE_NATIVE_ENDIAN,
};

1135 1136 1137 1138
bool memory_region_access_valid(MemoryRegion *mr,
                                hwaddr addr,
                                unsigned size,
                                bool is_write)
1139
{
1140 1141
    int access_size_min, access_size_max;
    int access_size, i;
1142

1143 1144 1145 1146
    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
        return false;
    }

1147
    if (!mr->ops->valid.accepts) {
1148 1149 1150
        return true;
    }

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
    access_size_min = mr->ops->valid.min_access_size;
    if (!mr->ops->valid.min_access_size) {
        access_size_min = 1;
    }

    access_size_max = mr->ops->valid.max_access_size;
    if (!mr->ops->valid.max_access_size) {
        access_size_max = 4;
    }

    access_size = MAX(MIN(size, access_size_max), access_size_min);
    for (i = 0; i < size; i += access_size) {
        if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
                                    is_write)) {
            return false;
        }
1167
    }
1168

1169 1170 1171
    return true;
}

1172 1173 1174 1175 1176
static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
                                                hwaddr addr,
                                                uint64_t *pval,
                                                unsigned size,
                                                MemTxAttrs attrs)
1177
{
1178
    *pval = 0;
1179

1180
    if (mr->ops->read) {
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
        return access_with_adjusted_size(addr, pval, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_read_accessor,
                                         mr, attrs);
    } else if (mr->ops->read_with_attrs) {
        return access_with_adjusted_size(addr, pval, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_read_with_attrs_accessor,
                                         mr, attrs);
1192
    } else {
1193 1194 1195
        return access_with_adjusted_size(addr, pval, size, 1, 4,
                                         memory_region_oldmmio_read_accessor,
                                         mr, attrs);
1196
    }
1197 1198
}

1199 1200 1201 1202 1203
MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
                                        hwaddr addr,
                                        uint64_t *pval,
                                        unsigned size,
                                        MemTxAttrs attrs)
1204
{
1205 1206
    MemTxResult r;

1207 1208
    if (!memory_region_access_valid(mr, addr, size, false)) {
        *pval = unassigned_mem_read(mr, addr, size);
1209
        return MEMTX_DECODE_ERROR;
1210
    }
1211

1212
    r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1213
    adjust_endianness(mr, pval, size);
1214
    return r;
1215
}
1216

Pavel Fedin's avatar
Pavel Fedin committed
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
/* Return true if an eventfd was signalled */
static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
                                                    hwaddr addr,
                                                    uint64_t data,
                                                    unsigned size,
                                                    MemTxAttrs attrs)
{
    MemoryRegionIoeventfd ioeventfd = {
        .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
        .data = data,
    };
    unsigned i;

    for (i = 0; i < mr->ioeventfd_nb; i++) {
        ioeventfd.match_data = mr->ioeventfds[i].match_data;
        ioeventfd.e = mr->ioeventfds[i].e;

        if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) {
            event_notifier_set(ioeventfd.e);
            return true;
        }
    }

    return false;
}

1243 1244 1245 1246 1247
MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
                                         hwaddr addr,
                                         uint64_t data,
                                         unsigned size,
                                         MemTxAttrs attrs)
1248
{
1249
    if (!memory_region_access_valid(mr, addr, size, true)) {
1250
        unassigned_mem_write(mr, addr, data, size);
1251
        return MEMTX_DECODE_ERROR;
1252 1253
    }

1254 1255
    adjust_endianness(mr, &data, size);

Pavel Fedin's avatar
Pavel Fedin committed
1256 1257 1258 1259 1260
    if ((!kvm_eventfds_enabled()) &&
        memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
        return MEMTX_OK;
    }

1261
    if (mr->ops->write) {
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
        return access_with_adjusted_size(addr, &data, size,
                                         mr->ops->impl.min_access_size,
                                         mr->ops->impl.max_access_size,
                                         memory_region_write_accessor, mr,
                                         attrs);
    } else if (mr->ops->write_with_attrs) {
        return
            access_with_adjusted_size(addr, &data, size,
                                      mr->ops->impl.min_access_size,
                                      mr->ops->impl.max_access_size,
                                      memory_region_write_with_attrs_accessor,
                                      mr, attrs);
1274
    } else {
1275 1276 1277
        return access_with_adjusted_size(addr, &data, size, 1, 4,
                                         memory_region_oldmmio_write_accessor,
                                         mr, attrs);
1278
    }
1279 1280 1281
}

void memory_region_init_io(MemoryRegion *mr,
1282
                           Object *owner,
1283 1284 1285 1286 1287
                           const MemoryRegionOps *ops,
                           void *opaque,
                           const char *name,
                           uint64_t size)
{
1288
    memory_region_init(mr, owner, name, size);
1289
    mr->ops = ops ? ops : &unassigned_mem_ops;
1290
    mr->opaque = opaque;
1291
    mr->terminates = true;
1292 1293 1294
}

void memory_region_init_ram(MemoryRegion *mr,
1295
                            Object *owner,
1296
                            const char *name,
1297 1298
                            uint64_t size,
                            Error **errp)
1299
{
1300
    memory_region_init(mr, owner, name, size);
1301
    mr->ram = true;
1302
    mr->terminates = true;
1303
    mr->destructor = memory_region_destructor_ram;
1304
    mr->ram_block = qemu_ram_alloc(size, mr, errp);
1305
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1306 1307
}

1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
void memory_region_init_resizeable_ram(MemoryRegion *mr,
                                       Object *owner,
                                       const char *name,
                                       uint64_t size,
                                       uint64_t max_size,
                                       void (*resized)(const char*,
                                                       uint64_t length,
                                                       void *host),
                                       Error **errp)
{
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
1322 1323
    mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
                                              mr, errp);
1324
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1325 1326
}

1327 1328 1329 1330 1331
#ifdef __linux__
void memory_region_init_ram_from_file(MemoryRegion *mr,
                                      struct Object *owner,
                                      const char *name,
                                      uint64_t size,
1332
                                      bool share,
1333 1334
                                      const char *path,
                                      Error **errp)
1335 1336 1337 1338 1339
{
    memory_region_init(mr, owner, name, size);
    mr->ram = true;
    mr->terminates = true;
    mr->destructor = memory_region_destructor_ram;
1340
    mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp);
1341
    mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1342
}
1343
#endif
1344 1345

void memory_region_init_ram_ptr(MemoryRegion *mr,
Paolo Bonzini's avatar