diff --git a/hw/s390x/css.c b/hw/s390x/css.c index 7074d2b3d5..122cc7e66f 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -140,7 +140,6 @@ static void sch_handle_clear_func(SubchDev *sch) s->flags &= ~SCSW_FLAGS_MASK_PNO; /* We always 'attempt to issue the clear signal', and we always succeed. */ - sch->orb = NULL; sch->channel_prog = 0x0; sch->last_cmd_valid = false; s->ctrl &= ~SCSW_ACTL_CLEAR_PEND; @@ -163,7 +162,6 @@ static void sch_handle_halt_func(SubchDev *sch) path = 0x80; /* We always 'attempt to issue the halt signal', and we always succeed. */ - sch->orb = NULL; sch->channel_prog = 0x0; sch->last_cmd_valid = false; s->ctrl &= ~SCSW_ACTL_HALT_PEND; @@ -317,12 +315,11 @@ static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr) return ret; } -static void sch_handle_start_func(SubchDev *sch) +static void sch_handle_start_func(SubchDev *sch, ORB *orb) { PMCW *p = &sch->curr_status.pmcw; SCSW *s = &sch->curr_status.scsw; - ORB *orb = sch->orb; int path; int ret; @@ -331,6 +328,7 @@ static void sch_handle_start_func(SubchDev *sch) if (!(s->ctrl & SCSW_ACTL_SUSP)) { /* Look at the orb and try to execute the channel program. */ + assert(orb != NULL); /* resume does not pass an orb */ p->intparm = orb->intparm; if (!(orb->lpm & path)) { /* Generate a deferred cc 3 condition. */ @@ -406,7 +404,7 @@ static void sch_handle_start_func(SubchDev *sch) * read/writes) asynchronous later on if we start supporting more than * our current very simple devices. */ -static void do_subchannel_work(SubchDev *sch) +static void do_subchannel_work(SubchDev *sch, ORB *orb) { SCSW *s = &sch->curr_status.scsw; @@ -416,7 +414,7 @@ static void do_subchannel_work(SubchDev *sch) } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) { sch_handle_halt_func(sch); } else if (s->ctrl & SCSW_FCTL_START_FUNC) { - sch_handle_start_func(sch); + sch_handle_start_func(sch, orb); } else { /* Cannot happen. */ return; @@ -594,7 +592,6 @@ int css_do_xsch(SubchDev *sch) SCSW_ACTL_SUSP); sch->channel_prog = 0x0; sch->last_cmd_valid = false; - sch->orb = NULL; s->dstat = 0; s->cstat = 0; ret = 0; @@ -618,7 +615,7 @@ int css_do_csch(SubchDev *sch) s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL); s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_CLEAR_FUNC; - do_subchannel_work(sch); + do_subchannel_work(sch, NULL); ret = 0; out: @@ -659,7 +656,7 @@ int css_do_hsch(SubchDev *sch) } s->ctrl |= SCSW_ACTL_HALT_PEND; - do_subchannel_work(sch); + do_subchannel_work(sch, NULL); ret = 0; out: @@ -721,13 +718,12 @@ int css_do_ssch(SubchDev *sch, ORB *orb) if (channel_subsys->chnmon_active) { css_update_chnmon(sch); } - sch->orb = orb; sch->channel_prog = orb->cpa; /* Trigger the start function. */ s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); s->flags &= ~SCSW_FLAGS_MASK_PNO; - do_subchannel_work(sch); + do_subchannel_work(sch, orb); ret = 0; out: @@ -957,7 +953,7 @@ int css_do_rsch(SubchDev *sch) } s->ctrl |= SCSW_ACTL_RESUME_PEND; - do_subchannel_work(sch); + do_subchannel_work(sch, NULL); ret = 0; out: @@ -1267,7 +1263,6 @@ void css_reset_sch(SubchDev *sch) sch->channel_prog = 0x0; sch->last_cmd_valid = false; - sch->orb = NULL; sch->thinint_active = false; } diff --git a/hw/s390x/css.h b/hw/s390x/css.h index e9b440540d..220169e7c3 100644 --- a/hw/s390x/css.h +++ b/hw/s390x/css.h @@ -76,7 +76,6 @@ struct SubchDev { hwaddr channel_prog; CCW1 last_cmd; bool last_cmd_valid; - ORB *orb; bool thinint_active; /* transport-provided data: */ int (*ccw_cb) (SubchDev *, CCW1); diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 2bf0af8f0a..1cb4e2c2f8 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -559,7 +559,6 @@ static int virtio_ccw_device_init(VirtioCcwDevice *dev, VirtIODevice *vdev) /* Initialize subchannel structure. */ sch->channel_prog = 0x0; sch->last_cmd_valid = false; - sch->orb = NULL; sch->thinint_active = false; /* * Use a device number if provided. Otherwise, fall back to subchannel diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index 41903a93fb..aad277af49 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -270,6 +270,9 @@ typedef struct CPUS390XState { #define FLAG_MASK_64 (PSW_MASK_64 >> 32) #define FLAG_MASK_32 0x00001000 +/* Control register 0 bits */ +#define CR0_EDAT 0x0000000000800000ULL + static inline int cpu_mmu_index (CPUS390XState *env) { if (env->psw.mask & PSW_MASK_PSTATE) { @@ -927,6 +930,7 @@ struct sysib_322 { #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ #define _SEGMENT_ENTRY_ORIGIN ~0x7ffULL /* segment table origin */ +#define _SEGMENT_ENTRY_FC 0x400 /* format control */ #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ diff --git a/target-s390x/helper.c b/target-s390x/helper.c index aa628b8fe2..7c76fc149b 100644 --- a/target-s390x/helper.c +++ b/target-s390x/helper.c @@ -170,6 +170,64 @@ static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr, trigger_pgm_exception(env, type, ilen); } +/** + * Translate real address to absolute (= physical) + * address by taking care of the prefix mapping. + */ +static target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr) +{ + if (raddr < 0x2000) { + return raddr + env->psa; /* Map the lowcore. */ + } else if (raddr >= env->psa && raddr < env->psa + 0x2000) { + return raddr - env->psa; /* Map the 0 page. */ + } + return raddr; +} + +/* Decode page table entry (normal 4KB page) */ +static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr, + uint64_t asc, uint64_t asce, + target_ulong *raddr, int *flags, int rw) +{ + if (asce & _PAGE_INVALID) { + DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, asce); + trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw); + return -1; + } + + if (asce & _PAGE_RO) { + *flags &= ~PAGE_WRITE; + } + + *raddr = asce & _ASCE_ORIGIN; + + PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, asce); + + return 0; +} + +/* Decode EDAT1 segment frame absolute address (1MB page) */ +static int mmu_translate_sfaa(CPUS390XState *env, target_ulong vaddr, + uint64_t asc, uint64_t asce, target_ulong *raddr, + int *flags, int rw) +{ + if (asce & _SEGMENT_ENTRY_INV) { + DPRINTF("%s: SEG=0x%" PRIx64 " invalid\n", __func__, asce); + trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw); + return -1; + } + + if (asce & _SEGMENT_ENTRY_RO) { + *flags &= ~PAGE_WRITE; + } + + *raddr = (asce & 0xfffffffffff00000ULL) | (vaddr & 0xfffff); + + PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, asce); + + return 0; +} + static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, uint64_t asc, uint64_t asce, int level, target_ulong *raddr, int *flags, int rw) @@ -229,28 +287,18 @@ static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr, PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n", __func__, origin, offs, new_asce); - if (level != _ASCE_TYPE_SEGMENT) { + if (level == _ASCE_TYPE_SEGMENT) { + /* 4KB page */ + return mmu_translate_pte(env, vaddr, asc, new_asce, raddr, flags, rw); + } else if (level - 4 == _ASCE_TYPE_SEGMENT && + (new_asce & _SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) { + /* 1MB page */ + return mmu_translate_sfaa(env, vaddr, asc, new_asce, raddr, flags, rw); + } else { /* yet another region */ return mmu_translate_asce(env, vaddr, asc, new_asce, level - 4, raddr, flags, rw); } - - /* PTE */ - if (new_asce & _PAGE_INVALID) { - DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, new_asce); - trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw); - return -1; - } - - if (new_asce & _PAGE_RO) { - *flags &= ~PAGE_WRITE; - } - - *raddr = new_asce & _ASCE_ORIGIN; - - PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, new_asce); - - return 0; } static int mmu_translate_asc(CPUS390XState *env, target_ulong vaddr, @@ -363,9 +411,7 @@ int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc, out: /* Convert real address -> absolute address */ - if (*raddr < 0x2000) { - *raddr = *raddr + env->psa; - } + *raddr = mmu_real2abs(env, *raddr); if (*raddr <= ram_size) { sk = &env->storage_keys[*raddr / TARGET_PAGE_SIZE];