softmmu_template.h 20.9 KB
Newer Older
1 2
/*
 *  Software MMU support
3
 *
Blue Swirl's avatar
Blue Swirl committed
4 5 6 7 8
 * Generate helpers used by TCG for qemu_ld/st ops and code load
 * functions.
 *
 * Included from target op helpers and exec.c.
 *
9 10 11 12 13 14 15 16 17 18 19 20 21
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
22
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23
 */
24
#include "qemu/timer.h"
25
#include "exec/address-spaces.h"
26
#include "exec/memory.h"
27

28 29 30 31
#define DATA_SIZE (1 << SHIFT)

#if DATA_SIZE == 8
#define SUFFIX q
32
#define LSUFFIX q
33
#define SDATA_TYPE  int64_t
34
#define DATA_TYPE  uint64_t
35 36
#elif DATA_SIZE == 4
#define SUFFIX l
37
#define LSUFFIX l
38
#define SDATA_TYPE  int32_t
39
#define DATA_TYPE  uint32_t
40 41
#elif DATA_SIZE == 2
#define SUFFIX w
42
#define LSUFFIX uw
43
#define SDATA_TYPE  int16_t
44
#define DATA_TYPE  uint16_t
45 46
#elif DATA_SIZE == 1
#define SUFFIX b
47
#define LSUFFIX ub
48
#define SDATA_TYPE  int8_t
49
#define DATA_TYPE  uint8_t
50 51 52 53
#else
#error unsupported data size
#endif

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

/* For the benefit of TCG generated code, we want to avoid the complication
   of ABI-specific return type promotion and always return a value extended
   to the register size of the host.  This is tcg_target_long, except in the
   case of a 32-bit host and 64-bit data, and for that we always have
   uint64_t.  Don't bother with this widened value for SOFTMMU_CODE_ACCESS.  */
#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
# define WORD_TYPE  DATA_TYPE
# define USUFFIX    SUFFIX
#else
# define WORD_TYPE  tcg_target_ulong
# define USUFFIX    glue(u, SUFFIX)
# define SSUFFIX    glue(s, SUFFIX)
#endif

bellard's avatar
bellard committed
69
#ifdef SOFTMMU_CODE_ACCESS
70
#define READ_ACCESS_TYPE MMU_INST_FETCH
bellard's avatar
bellard committed
71
#define ADDR_READ addr_code
bellard's avatar
bellard committed
72
#else
73
#define READ_ACCESS_TYPE MMU_DATA_LOAD
bellard's avatar
bellard committed
74
#define ADDR_READ addr_read
bellard's avatar
bellard committed
75 76
#endif

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
#if DATA_SIZE == 8
# define BSWAP(X)  bswap64(X)
#elif DATA_SIZE == 4
# define BSWAP(X)  bswap32(X)
#elif DATA_SIZE == 2
# define BSWAP(X)  bswap16(X)
#else
# define BSWAP(X)  (X)
#endif

#ifdef TARGET_WORDS_BIGENDIAN
# define TGT_BE(X)  (X)
# define TGT_LE(X)  BSWAP(X)
#else
# define TGT_BE(X)  BSWAP(X)
# define TGT_LE(X)  (X)
#endif

#if DATA_SIZE == 1
# define helper_le_ld_name  glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name  helper_le_ld_name
# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name helper_le_lds_name
# define helper_le_st_name  glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name  helper_le_st_name
#else
# define helper_le_ld_name  glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name  glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
# define helper_le_st_name  glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name  glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
#endif

#ifdef TARGET_WORDS_BIGENDIAN
# define helper_te_ld_name  helper_be_ld_name
# define helper_te_st_name  helper_be_st_name
#else
# define helper_te_ld_name  helper_le_ld_name
# define helper_te_st_name  helper_le_st_name
#endif

119 120 121 122 123 124 125
/* macro to check the victim tlb */
#define VICTIM_TLB_HIT(ty)                                                    \
({                                                                            \
    /* we are about to do a page table walk. our last hope is the             \
     * victim tlb. try to refill from the victim tlb before walking the       \
     * page table. */                                                         \
    int vidx;                                                                 \
126
    CPUIOTLBEntry tmpiotlb;                                                   \
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
    CPUTLBEntry tmptlb;                                                       \
    for (vidx = CPU_VTLB_SIZE-1; vidx >= 0; --vidx) {                         \
        if (env->tlb_v_table[mmu_idx][vidx].ty == (addr & TARGET_PAGE_MASK)) {\
            /* found entry in victim tlb, swap tlb and iotlb */               \
            tmptlb = env->tlb_table[mmu_idx][index];                          \
            env->tlb_table[mmu_idx][index] = env->tlb_v_table[mmu_idx][vidx]; \
            env->tlb_v_table[mmu_idx][vidx] = tmptlb;                         \
            tmpiotlb = env->iotlb[mmu_idx][index];                            \
            env->iotlb[mmu_idx][index] = env->iotlb_v[mmu_idx][vidx];         \
            env->iotlb_v[mmu_idx][vidx] = tmpiotlb;                           \
            break;                                                            \
        }                                                                     \
    }                                                                         \
    /* return true when there is a vtlb hit, i.e. vidx >=0 */                 \
    vidx >= 0;                                                                \
})

144
#ifndef SOFTMMU_CODE_ACCESS
145
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
146
                                              CPUIOTLBEntry *iotlbentry,
pbrook's avatar
pbrook committed
147
                                              target_ulong addr,
148
                                              uintptr_t retaddr)
149
{
150
    uint64_t val;
151
    CPUState *cpu = ENV_GET_CPU(env);
152
    hwaddr physaddr = iotlbentry->addr;
153
    MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
154

pbrook's avatar
pbrook committed
155
    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
156
    cpu->mem_io_pc = retaddr;
157
    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
158
        cpu_io_recompile(cpu, retaddr);
pbrook's avatar
pbrook committed
159
    }
160

161
    cpu->mem_io_vaddr = addr;
162
    memory_region_dispatch_read(mr, physaddr, &val, 1 << SHIFT,
163
                                iotlbentry->attrs);
164
    return val;
165
}
166
#endif
167

168 169
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr)
170
{
171
    unsigned mmu_idx = get_mmuidx(oi);
172 173 174
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    uintptr_t haddr;
175
    DATA_TYPE res;
176

177 178 179
    /* Adjust the given return address.  */
    retaddr -= GETPC_ADJ;

180 181 182
    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
183 184
        if ((addr & (DATA_SIZE - 1)) != 0
            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
185 186
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                 mmu_idx, retaddr);
187
        }
188 189 190 191
        if (!VICTIM_TLB_HIT(ADDR_READ)) {
            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                     mmu_idx, retaddr);
        }
192 193 194 195 196
        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
197
        CPUIOTLBEntry *iotlbentry;
198 199
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
200
        }
201
        iotlbentry = &env->iotlb[mmu_idx][index];
202 203 204

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
205
        res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
206 207
        res = TGT_LE(res);
        return res;
208 209 210 211 212 213 214
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                    >= TARGET_PAGE_SIZE)) {
        target_ulong addr1, addr2;
215
        DATA_TYPE res1, res2;
216 217
        unsigned shift;
    do_unaligned_access:
218 219 220 221
        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                 mmu_idx, retaddr);
        }
222 223
        addr1 = addr & ~(DATA_SIZE - 1);
        addr2 = addr1 + DATA_SIZE;
224 225
        /* Note the adjustment at the beginning of the function.
           Undo that for the recursion.  */
226 227
        res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
        res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
228
        shift = (addr & (DATA_SIZE - 1)) * 8;
229 230

        /* Little-endian combine.  */
231
        res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
232 233 234 235
        return res;
    }

    /* Handle aligned access or unaligned access in the same page.  */
236 237
    if ((addr & (DATA_SIZE - 1)) != 0
        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
238 239
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                             mmu_idx, retaddr);
240 241 242 243 244 245 246 247 248 249 250 251
    }

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
    res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
#else
    res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
#endif
    return res;
}

#if DATA_SIZE > 1
252 253
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr)
254
{
255
    unsigned mmu_idx = get_mmuidx(oi);
256 257 258 259 260 261 262 263 264 265 266
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    uintptr_t haddr;
    DATA_TYPE res;

    /* Adjust the given return address.  */
    retaddr -= GETPC_ADJ;

    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
267 268
        if ((addr & (DATA_SIZE - 1)) != 0
            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
269 270
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                 mmu_idx, retaddr);
271
        }
272 273 274 275
        if (!VICTIM_TLB_HIT(ADDR_READ)) {
            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                     mmu_idx, retaddr);
        }
276 277 278 279 280
        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
281
        CPUIOTLBEntry *iotlbentry;
282 283 284
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }
285
        iotlbentry = &env->iotlb[mmu_idx][index];
286 287 288

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
289
        res = glue(io_read, SUFFIX)(env, iotlbentry, addr, retaddr);
290 291 292 293 294 295 296 297 298 299 300 301
        res = TGT_BE(res);
        return res;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                    >= TARGET_PAGE_SIZE)) {
        target_ulong addr1, addr2;
        DATA_TYPE res1, res2;
        unsigned shift;
    do_unaligned_access:
302 303 304 305
        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                 mmu_idx, retaddr);
        }
306 307 308 309
        addr1 = addr & ~(DATA_SIZE - 1);
        addr2 = addr1 + DATA_SIZE;
        /* Note the adjustment at the beginning of the function.
           Undo that for the recursion.  */
310 311
        res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
        res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
312 313 314 315
        shift = (addr & (DATA_SIZE - 1)) * 8;

        /* Big-endian combine.  */
        res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
316 317 318 319
        return res;
    }

    /* Handle aligned access or unaligned access in the same page.  */
320 321
    if ((addr & (DATA_SIZE - 1)) != 0
        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
322 323
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                             mmu_idx, retaddr);
324
    }
325 326

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
327 328
    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
    return res;
329
}
330
#endif /* DATA_SIZE > 1 */
331

bellard's avatar
bellard committed
332 333
#ifndef SOFTMMU_CODE_ACCESS

334 335 336
/* Provide signed versions of the load routines as well.  We can of course
   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
337
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
338
                             TCGMemOpIdx oi, uintptr_t retaddr)
339
{
340
    return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
341 342 343 344
}

# if DATA_SIZE > 1
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
345
                             TCGMemOpIdx oi, uintptr_t retaddr)
346
{
347
    return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
348
}
349
# endif
350 351
#endif

352
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
353
                                          CPUIOTLBEntry *iotlbentry,
bellard's avatar
bellard committed
354
                                          DATA_TYPE val,
pbrook's avatar
pbrook committed
355
                                          target_ulong addr,
356
                                          uintptr_t retaddr)
bellard's avatar
bellard committed
357
{
358
    CPUState *cpu = ENV_GET_CPU(env);
359
    hwaddr physaddr = iotlbentry->addr;
360
    MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
361

pbrook's avatar
pbrook committed
362
    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
363
    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
364
        cpu_io_recompile(cpu, retaddr);
pbrook's avatar
pbrook committed
365
    }
bellard's avatar
bellard committed
366

367 368
    cpu->mem_io_vaddr = addr;
    cpu->mem_io_pc = retaddr;
369
    memory_region_dispatch_write(mr, physaddr, val, 1 << SHIFT,
370
                                 iotlbentry->attrs);
bellard's avatar
bellard committed
371
}
372

373
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
374
                       TCGMemOpIdx oi, uintptr_t retaddr)
375
{
376
    unsigned mmu_idx = get_mmuidx(oi);
377 378 379
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    uintptr_t haddr;
380

381 382 383
    /* Adjust the given return address.  */
    retaddr -= GETPC_ADJ;

384 385 386
    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
387 388
        if ((addr & (DATA_SIZE - 1)) != 0
            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
389 390
            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                 mmu_idx, retaddr);
391
        }
392
        if (!VICTIM_TLB_HIT(addr_write)) {
393
            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
394
        }
395 396 397 398 399
        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
400
        CPUIOTLBEntry *iotlbentry;
401 402 403
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }
404
        iotlbentry = &env->iotlb[mmu_idx][index];
405 406 407 408

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
        val = TGT_LE(val);
409
        glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
410 411 412 413 414 415 416 417 418
        return;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                     >= TARGET_PAGE_SIZE)) {
        int i;
    do_unaligned_access:
419 420 421 422
        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                 mmu_idx, retaddr);
        }
423 424 425 426
        /* XXX: not efficient, but simple */
        /* Note: relies on the fact that tlb_fill() does not remove the
         * previous page from the TLB cache.  */
        for (i = DATA_SIZE - 1; i >= 0; i--) {
427
            /* Little-endian extract.  */
428
            uint8_t val8 = val >> (i * 8);
429 430 431
            /* Note the adjustment at the beginning of the function.
               Undo that for the recursion.  */
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
432
                                            oi, retaddr + GETPC_ADJ);
433 434 435 436 437
        }
        return;
    }

    /* Handle aligned access or unaligned access in the same page.  */
438 439
    if ((addr & (DATA_SIZE - 1)) != 0
        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
440 441
        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                             mmu_idx, retaddr);
442 443 444 445 446 447 448
    }

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
#else
    glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
449
#endif
450 451 452 453
}

#if DATA_SIZE > 1
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
454
                       TCGMemOpIdx oi, uintptr_t retaddr)
455
{
456
    unsigned mmu_idx = get_mmuidx(oi);
457 458 459 460 461 462 463 464 465 466
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    uintptr_t haddr;

    /* Adjust the given return address.  */
    retaddr -= GETPC_ADJ;

    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
467 468
        if ((addr & (DATA_SIZE - 1)) != 0
            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
469 470
            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                 mmu_idx, retaddr);
471
        }
472
        if (!VICTIM_TLB_HIT(addr_write)) {
473
            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
474
        }
475 476 477 478 479
        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
480
        CPUIOTLBEntry *iotlbentry;
481 482 483
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }
484
        iotlbentry = &env->iotlb[mmu_idx][index];
485 486 487 488

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
        val = TGT_BE(val);
489
        glue(io_write, SUFFIX)(env, iotlbentry, val, addr, retaddr);
490 491 492 493 494 495 496 497 498
        return;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                     >= TARGET_PAGE_SIZE)) {
        int i;
    do_unaligned_access:
499 500 501 502
        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                 mmu_idx, retaddr);
        }
503 504 505 506 507 508
        /* XXX: not efficient, but simple */
        /* Note: relies on the fact that tlb_fill() does not remove the
         * previous page from the TLB cache.  */
        for (i = DATA_SIZE - 1; i >= 0; i--) {
            /* Big-endian extract.  */
            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
509 510
            /* Note the adjustment at the beginning of the function.
               Undo that for the recursion.  */
511
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
512
                                            oi, retaddr + GETPC_ADJ);
513
        }
514 515 516 517
        return;
    }

    /* Handle aligned access or unaligned access in the same page.  */
518 519
    if ((addr & (DATA_SIZE - 1)) != 0
        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
520 521
        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                             mmu_idx, retaddr);
522
    }
523 524

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
525
    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
526
}
527
#endif /* DATA_SIZE > 1 */
528

Yongbok Kim's avatar
Yongbok Kim committed
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
#if DATA_SIZE == 1
/* Probe for whether the specified guest write access is permitted.
 * If it is not permitted then an exception will be taken in the same
 * way as if this were a real write access (and we will not return).
 * Otherwise the function will return, and there will be a valid
 * entry in the TLB for this access.
 */
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
                 uintptr_t retaddr)
{
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;

    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
        /* TLB entry is for a different page */
        if (!VICTIM_TLB_HIT(addr_write)) {
            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
        }
    }
}
#endif
bellard's avatar
bellard committed
551 552 553
#endif /* !defined(SOFTMMU_CODE_ACCESS) */

#undef READ_ACCESS_TYPE
554 555 556
#undef SHIFT
#undef DATA_TYPE
#undef SUFFIX
557
#undef LSUFFIX
558
#undef DATA_SIZE
bellard's avatar
bellard committed
559
#undef ADDR_READ
560 561 562 563
#undef WORD_TYPE
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX
564 565 566 567 568 569 570 571 572 573 574 575 576
#undef BSWAP
#undef TGT_BE
#undef TGT_LE
#undef CPU_BE
#undef CPU_LE
#undef helper_le_ld_name
#undef helper_be_ld_name
#undef helper_le_lds_name
#undef helper_be_lds_name
#undef helper_le_st_name
#undef helper_be_st_name
#undef helper_te_ld_name
#undef helper_te_st_name