target-arm queue:

* further A64 decoder patches, including enabling the aarch64-linux-user
    target; this includes full floating point support. Neon is not yet
    supported.
  * cadence UART model fixes.
  * some minor bug fixes and cleanups.
  * all the softfloat fixes required by the new A64 instructions;
    several of these will also be used by PPC.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQIcBAABCAAGBQJSzaYYAAoJEDwlJe0UNgze+noQAIl800AN7JZREBevS7FnsHKv
 nUjwv4up7jjCKkglTuHTRiQrmxmmWvnQyqn3lYQMdnxS/v01nbq3ycXk5+kzugPr
 tDVMqbrKpmrYwF8TLdF2AGpB/9Cc7qyy2LGy7K+g2GSdjTP39zYQ/j5Am0zgXspQ
 upWbvSpkzr+cPGuY5SL+o6qlA9O1vLBrED8aDkl/HcbqaTTb+RJNc9zsLlul9Jv3
 xZ0XhUyyXZl5STPINyzX0ImbKY97oxEp8Te4uQ20EU4DWXWKFyVQsrBMDJgeSd8b
 x7SiFVLmlnjuuPh2LRsNKAsfOYIetDN3o0lHiKTbf9Z5WTSVqdvYIixvoiWd7ytB
 cnKYUiRDqHJlv4WaLHmNAaQ1LMzEeX1U/xHSEPmuXdWuoZMoftL7x3s+SW/1mPkh
 9KA1n3GG/KfyfcPj4RET1x26JIiPn+qFUmmL+grHeVOKCz9BkfxX4k/dRLgcTkvX
 Kix1b+j62CVk9KdbUEfOyNJIBi+k5Onv7fsCLjgxuVf5VTM4COSPxoiiQVQmUUQS
 xSlxwY35wDV4dOR+aA1alfnlD35gKkbyY5jd75E2oq77zIBR/8LX8seTCPUgnB/7
 PWKjDEEG41T7CNZ79+TcsI3wvrhVdpS9dKhRVu0nh0RV0VJPl16fuQCfeH6vleD4
 agDQpUhpzkv/jHbiD76b
 =nyX0
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'pmaydell/tags/pull-target-arm-20140108' into staging

target-arm queue:
 * further A64 decoder patches, including enabling the aarch64-linux-user
   target; this includes full floating point support. Neon is not yet
   supported.
 * cadence UART model fixes.
 * some minor bug fixes and cleanups.
 * all the softfloat fixes required by the new A64 instructions;
   several of these will also be used by PPC.

# gpg: Signature made Wed 08 Jan 2014 11:25:12 AM PST using RSA key ID 14360CDE
# gpg: Can't check signature: public key not found

* pmaydell/tags/pull-target-arm-20140108: (76 commits)
  target-arm: A64: Add support for FCVT between half, single and double
  target-arm: A64: Add 1-source 32-to-32 and 64-to-64 FP instructions
  target-arm: A64: Add floating-point<->integer conversion instructions
  target-arm: A64: Add floating-point<->fixed-point instructions
  target-arm: A64: Add extra VFP fixed point conversion helpers
  target-arm: Ignore most exceptions from scalbn when doing fixpoint conversion
  target-arm: Rename A32 VFP conversion helpers
  target-arm: Prepare VFP_CONV_FIX helpers for A64 uses
  softfloat: Add support for ties-away rounding
  softfloat: Refactor code handling various rounding modes
  softfloat: Add float16 <=> float64 conversion functions
  softfloat: Factor out RoundAndPackFloat16 and NormalizeFloat16Subnormal
  softfloat: Provide complete set of accessors for fp state
  softfloat: Fix float64_to_uint32_round_to_zero
  softfloat: Fix float64_to_uint32
  softfloat: Fix float64_to_uint64_round_to_zero
  softfloat: Add float32_to_uint64()
  softfloat: Fix factor 2 error for scalbn on denormal inputs
  softfloat: Only raise Invalid when conversions to int are out of range
  softfloat: Fix float64_to_uint64
  ...

Message-id: 1389209439-25448-1-git-send-email-peter.maydell@linaro.org
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
This commit is contained in:
Anthony Liguori 2014-01-09 11:23:49 -08:00
commit b61740dbef
27 changed files with 4506 additions and 669 deletions

View file

@ -16,6 +16,7 @@ env:
matrix:
- TARGETS=alpha-softmmu,alpha-linux-user
- TARGETS=arm-softmmu,arm-linux-user
- TARGETS=aarch64-softmmu,aarch64-linux-user
- TARGETS=cris-softmmu
- TARGETS=i386-softmmu,x86_64-softmmu
- TARGETS=lm32-softmmu

View file

@ -0,0 +1,3 @@
# Default configuration for aarch64-linux-user
CONFIG_GDBSTUB_XML=y

File diff suppressed because it is too large Load diff

View file

@ -49,9 +49,11 @@ static void gem_init(NICInfo *nd, uint32_t base, qemu_irq irq)
DeviceState *dev;
SysBusDevice *s;
qemu_check_nic_model(nd, "cadence_gem");
dev = qdev_create(NULL, "cadence_gem");
qdev_set_nic_properties(dev, nd);
if (nd->used) {
qemu_check_nic_model(nd, "cadence_gem");
qdev_set_nic_properties(dev, nd);
}
qdev_init_nofail(dev);
s = SYS_BUS_DEVICE(dev);
sysbus_mmio_map(s, 0, base);
@ -113,7 +115,6 @@ static void zynq_init(QEMUMachineInitArgs *args)
DeviceState *dev;
SysBusDevice *busdev;
qemu_irq pic[64];
NICInfo *nd;
Error *err = NULL;
int n;
@ -190,14 +191,8 @@ static void zynq_init(QEMUMachineInitArgs *args)
sysbus_create_varargs("cadence_ttc", 0xF8002000,
pic[69-IRQ_OFFSET], pic[70-IRQ_OFFSET], pic[71-IRQ_OFFSET], NULL);
for (n = 0; n < nb_nics; n++) {
nd = &nd_table[n];
if (n == 0) {
gem_init(nd, 0xE000B000, pic[54-IRQ_OFFSET]);
} else if (n == 1) {
gem_init(nd, 0xE000C000, pic[77-IRQ_OFFSET]);
}
}
gem_init(&nd_table[0], 0xE000B000, pic[54-IRQ_OFFSET]);
gem_init(&nd_table[1], 0xE000C000, pic[77-IRQ_OFFSET]);
dev = qdev_create(NULL, "generic-sdhci");
qdev_init_nofail(dev);

View file

@ -34,6 +34,9 @@
#define UART_SR_INTR_RFUL 0x00000004
#define UART_SR_INTR_TEMPTY 0x00000008
#define UART_SR_INTR_TFUL 0x00000010
/* somewhat awkwardly, TTRIG is misaligned between SR and ISR */
#define UART_SR_TTRIG 0x00002000
#define UART_INTR_TTRIG 0x00000400
/* bits fields in CSR that correlate to CISR. If any of these bits are set in
* SR, then the same bit in CISR is set high too */
#define UART_SR_TO_CISR_MASK 0x0000001F
@ -43,6 +46,7 @@
#define UART_INTR_PARE 0x00000080
#define UART_INTR_TIMEOUT 0x00000100
#define UART_INTR_DMSI 0x00000200
#define UART_INTR_TOVR 0x00001000
#define UART_SR_RACTIVE 0x00000400
#define UART_SR_TACTIVE 0x00000800
@ -110,23 +114,37 @@
#define CADENCE_UART(obj) OBJECT_CHECK(UartState, (obj), TYPE_CADENCE_UART)
typedef struct {
/*< private >*/
SysBusDevice parent_obj;
/*< public >*/
MemoryRegion iomem;
uint32_t r[R_MAX];
uint8_t r_fifo[RX_FIFO_SIZE];
uint8_t rx_fifo[RX_FIFO_SIZE];
uint8_t tx_fifo[TX_FIFO_SIZE];
uint32_t rx_wpos;
uint32_t rx_count;
uint32_t tx_count;
uint64_t char_tx_time;
CharDriverState *chr;
qemu_irq irq;
QEMUTimer *fifo_trigger_handle;
QEMUTimer *tx_time_handle;
} UartState;
static void uart_update_status(UartState *s)
{
s->r[R_SR] = 0;
s->r[R_SR] |= s->rx_count == RX_FIFO_SIZE ? UART_SR_INTR_RFUL : 0;
s->r[R_SR] |= !s->rx_count ? UART_SR_INTR_REMPTY : 0;
s->r[R_SR] |= s->rx_count >= s->r[R_RTRIG] ? UART_SR_INTR_RTRIG : 0;
s->r[R_SR] |= s->tx_count == TX_FIFO_SIZE ? UART_SR_INTR_TFUL : 0;
s->r[R_SR] |= !s->tx_count ? UART_SR_INTR_TEMPTY : 0;
s->r[R_SR] |= s->tx_count >= s->r[R_TTRIG] ? UART_SR_TTRIG : 0;
s->r[R_CISR] |= s->r[R_SR] & UART_SR_TO_CISR_MASK;
s->r[R_CISR] |= s->r[R_SR] & UART_SR_TTRIG ? UART_INTR_TTRIG : 0;
qemu_set_irq(s->irq, !!(s->r[R_IMR] & s->r[R_CISR]));
}
@ -139,24 +157,6 @@ static void fifo_trigger_update(void *opaque)
uart_update_status(s);
}
static void uart_tx_redo(UartState *s)
{
uint64_t new_tx_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
timer_mod(s->tx_time_handle, new_tx_time + s->char_tx_time);
s->r[R_SR] |= UART_SR_INTR_TEMPTY;
uart_update_status(s);
}
static void uart_tx_write(void *opaque)
{
UartState *s = (UartState *)opaque;
uart_tx_redo(s);
}
static void uart_rx_reset(UartState *s)
{
s->rx_wpos = 0;
@ -164,15 +164,11 @@ static void uart_rx_reset(UartState *s)
if (s->chr) {
qemu_chr_accept_input(s->chr);
}
s->r[R_SR] |= UART_SR_INTR_REMPTY;
s->r[R_SR] &= ~UART_SR_INTR_RFUL;
}
static void uart_tx_reset(UartState *s)
{
s->r[R_SR] |= UART_SR_INTR_TEMPTY;
s->r[R_SR] &= ~UART_SR_INTR_TFUL;
s->tx_count = 0;
}
static void uart_send_breaks(UartState *s)
@ -237,8 +233,16 @@ static void uart_parameters_setup(UartState *s)
static int uart_can_receive(void *opaque)
{
UartState *s = (UartState *)opaque;
int ret = MAX(RX_FIFO_SIZE, TX_FIFO_SIZE);
uint32_t ch_mode = s->r[R_MR] & UART_MR_CHMODE;
return RX_FIFO_SIZE - s->rx_count;
if (ch_mode == NORMAL_MODE || ch_mode == ECHO_MODE) {
ret = MIN(ret, RX_FIFO_SIZE - s->rx_count);
}
if (ch_mode == REMOTE_LOOPBACK || ch_mode == ECHO_MODE) {
ret = MIN(ret, TX_FIFO_SIZE - s->tx_count);
}
return ret;
}
static void uart_ctrl_update(UartState *s)
@ -253,10 +257,6 @@ static void uart_ctrl_update(UartState *s)
s->r[R_CR] &= ~(UART_CR_TXRST | UART_CR_RXRST);
if ((s->r[R_CR] & UART_CR_TX_EN) && !(s->r[R_CR] & UART_CR_TX_DIS)) {
uart_tx_redo(s);
}
if (s->r[R_CR] & UART_CR_STARTBRK && !(s->r[R_CR] & UART_CR_STOPBRK)) {
uart_send_breaks(s);
}
@ -272,24 +272,13 @@ static void uart_write_rx_fifo(void *opaque, const uint8_t *buf, int size)
return;
}
s->r[R_SR] &= ~UART_SR_INTR_REMPTY;
if (s->rx_count == RX_FIFO_SIZE) {
s->r[R_CISR] |= UART_INTR_ROVR;
} else {
for (i = 0; i < size; i++) {
s->r_fifo[s->rx_wpos] = buf[i];
s->rx_fifo[s->rx_wpos] = buf[i];
s->rx_wpos = (s->rx_wpos + 1) % RX_FIFO_SIZE;
s->rx_count++;
if (s->rx_count == RX_FIFO_SIZE) {
s->r[R_SR] |= UART_SR_INTR_RFUL;
break;
}
if (s->rx_count >= s->r[R_RTRIG]) {
s->r[R_SR] |= UART_SR_INTR_RTRIG;
}
}
timer_mod(s->fifo_trigger_handle, new_rx_time +
(s->char_tx_time * 4));
@ -297,13 +286,55 @@ static void uart_write_rx_fifo(void *opaque, const uint8_t *buf, int size)
uart_update_status(s);
}
static gboolean cadence_uart_xmit(GIOChannel *chan, GIOCondition cond,
void *opaque)
{
UartState *s = opaque;
int ret;
/* instant drain the fifo when there's no back-end */
if (!s->chr) {
s->tx_count = 0;
}
if (!s->tx_count) {
return FALSE;
}
ret = qemu_chr_fe_write(s->chr, s->tx_fifo, s->tx_count);
s->tx_count -= ret;
memmove(s->tx_fifo, s->tx_fifo + ret, s->tx_count);
if (s->tx_count) {
int r = qemu_chr_fe_add_watch(s->chr, G_IO_OUT, cadence_uart_xmit, s);
assert(r);
}
uart_update_status(s);
return FALSE;
}
static void uart_write_tx_fifo(UartState *s, const uint8_t *buf, int size)
{
if ((s->r[R_CR] & UART_CR_TX_DIS) || !(s->r[R_CR] & UART_CR_TX_EN)) {
return;
}
qemu_chr_fe_write_all(s->chr, buf, size);
if (size > TX_FIFO_SIZE - s->tx_count) {
size = TX_FIFO_SIZE - s->tx_count;
/*
* This can only be a guest error via a bad tx fifo register push,
* as can_receive() should stop remote loop and echo modes ever getting
* us to here.
*/
qemu_log_mask(LOG_GUEST_ERROR, "cadence_uart: TxFIFO overflow");
s->r[R_CISR] |= UART_INTR_ROVR;
}
memcpy(s->tx_fifo + s->tx_count, buf, size);
s->tx_count += size;
cadence_uart_xmit(NULL, G_IO_OUT, s);
}
static void uart_receive(void *opaque, const uint8_t *buf, int size)
@ -337,26 +368,17 @@ static void uart_read_rx_fifo(UartState *s, uint32_t *c)
return;
}
s->r[R_SR] &= ~UART_SR_INTR_RFUL;
if (s->rx_count) {
uint32_t rx_rpos =
(RX_FIFO_SIZE + s->rx_wpos - s->rx_count) % RX_FIFO_SIZE;
*c = s->r_fifo[rx_rpos];
*c = s->rx_fifo[rx_rpos];
s->rx_count--;
if (!s->rx_count) {
s->r[R_SR] |= UART_SR_INTR_REMPTY;
}
qemu_chr_accept_input(s->chr);
} else {
*c = 0;
s->r[R_SR] |= UART_SR_INTR_REMPTY;
}
if (s->rx_count < s->r[R_RTRIG]) {
s->r[R_SR] &= ~UART_SR_INTR_RTRIG;
}
uart_update_status(s);
}
@ -401,6 +423,7 @@ static void uart_write(void *opaque, hwaddr offset,
uart_parameters_setup(s);
break;
}
uart_update_status(s);
}
static uint64_t uart_read(void *opaque, hwaddr offset,
@ -428,8 +451,10 @@ static const MemoryRegionOps uart_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
static void cadence_uart_reset(UartState *s)
static void cadence_uart_reset(DeviceState *dev)
{
UartState *s = CADENCE_UART(dev);
s->r[R_CR] = 0x00000128;
s->r[R_IMR] = 0;
s->r[R_CISR] = 0;
@ -440,8 +465,7 @@ static void cadence_uart_reset(UartState *s)
uart_rx_reset(s);
uart_tx_reset(s);
s->rx_count = 0;
s->rx_wpos = 0;
uart_update_status(s);
}
static int cadence_uart_init(SysBusDevice *dev)
@ -455,15 +479,10 @@ static int cadence_uart_init(SysBusDevice *dev)
s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
(QEMUTimerCB *)fifo_trigger_update, s);
s->tx_time_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
(QEMUTimerCB *)uart_tx_write, s);
s->char_tx_time = (get_ticks_per_sec() / 9600) * 10;
s->chr = qemu_char_get_next_serial();
cadence_uart_reset(s);
if (s->chr) {
qemu_chr_add_handlers(s->chr, uart_can_receive, uart_receive,
uart_event, s);
@ -483,17 +502,18 @@ static int cadence_uart_post_load(void *opaque, int version_id)
static const VMStateDescription vmstate_cadence_uart = {
.name = "cadence_uart",
.version_id = 1,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
.version_id = 2,
.minimum_version_id = 2,
.minimum_version_id_old = 2,
.post_load = cadence_uart_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32_ARRAY(r, UartState, R_MAX),
VMSTATE_UINT8_ARRAY(r_fifo, UartState, RX_FIFO_SIZE),
VMSTATE_UINT8_ARRAY(rx_fifo, UartState, RX_FIFO_SIZE),
VMSTATE_UINT8_ARRAY(tx_fifo, UartState, RX_FIFO_SIZE),
VMSTATE_UINT32(rx_count, UartState),
VMSTATE_UINT32(tx_count, UartState),
VMSTATE_UINT32(rx_wpos, UartState),
VMSTATE_TIMER(fifo_trigger_handle, UartState),
VMSTATE_TIMER(tx_time_handle, UartState),
VMSTATE_END_OF_LIST()
}
};
@ -505,6 +525,7 @@ static void cadence_uart_class_init(ObjectClass *klass, void *data)
sdc->init = cadence_uart_init;
dc->vmsd = &vmstate_cadence_uart;
dc->reset = cadence_uart_reset;
}
static const TypeInfo cadence_uart_info = {

View file

@ -128,7 +128,7 @@ static void gic_set_irq(void *opaque, int irq, int level)
if (level) {
GIC_SET_LEVEL(irq, cm);
if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) {
if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) {
DPRINTF("Set %d pending mask %x\n", irq, target);
GIC_SET_PENDING(irq, target);
}
@ -168,6 +168,15 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu)
return new_irq;
}
void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val)
{
if (irq < GIC_INTERNAL) {
s->priority1[irq][cpu] = val;
} else {
s->priority2[(irq) - GIC_INTERNAL] = val;
}
}
void gic_complete_irq(GICState *s, int cpu, int irq)
{
int update = 0;
@ -188,7 +197,7 @@ void gic_complete_irq(GICState *s, int cpu, int irq)
return; /* No active IRQ. */
/* Mark level triggered interrupts as pending if they are still
raised. */
if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
&& GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
DPRINTF("Set %d pending mask %x\n", irq, cm);
GIC_SET_PENDING(irq, cm);
@ -311,7 +320,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset)
for (i = 0; i < 4; i++) {
if (GIC_TEST_MODEL(irq + i))
res |= (1 << (i * 2));
if (GIC_TEST_TRIGGER(irq + i))
if (GIC_TEST_EDGE_TRIGGER(irq + i))
res |= (2 << (i * 2));
}
} else if (offset < 0xfe0) {
@ -386,7 +395,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
/* If a raised level triggered IRQ enabled then mark
is as pending. */
if (GIC_TEST_LEVEL(irq + i, mask)
&& !GIC_TEST_TRIGGER(irq + i)) {
&& !GIC_TEST_EDGE_TRIGGER(irq + i)) {
DPRINTF("Set %d pending mask %x\n", irq + i, mask);
GIC_SET_PENDING(irq + i, mask);
}
@ -443,11 +452,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
irq = (offset - 0x400) + GIC_BASE_IRQ;
if (irq >= s->num_irq)
goto bad_reg;
if (irq < GIC_INTERNAL) {
s->priority1[irq][cpu] = value;
} else {
s->priority2[irq - GIC_INTERNAL] = value;
}
gic_set_priority(s, cpu, irq, value);
} else if (offset < 0xc00) {
/* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
* annoying exception of the 11MPCore's GIC.
@ -478,9 +483,9 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
GIC_CLEAR_MODEL(irq + i);
}
if (value & (2 << (i * 2))) {
GIC_SET_TRIGGER(irq + i);
GIC_SET_EDGE_TRIGGER(irq + i);
} else {
GIC_CLEAR_TRIGGER(irq + i);
GIC_CLEAR_EDGE_TRIGGER(irq + i);
}
}
} else {

View file

@ -51,7 +51,7 @@ static const VMStateDescription vmstate_gic_irq_state = {
VMSTATE_UINT8(active, gic_irq_state),
VMSTATE_UINT8(level, gic_irq_state),
VMSTATE_BOOL(model, gic_irq_state),
VMSTATE_BOOL(trigger, gic_irq_state),
VMSTATE_BOOL(edge_trigger, gic_irq_state),
VMSTATE_END_OF_LIST()
}
};
@ -126,7 +126,7 @@ static void arm_gic_common_reset(DeviceState *dev)
}
for (i = 0; i < 16; i++) {
GIC_SET_ENABLED(i, ALL_CPU_MASK);
GIC_SET_TRIGGER(i);
GIC_SET_EDGE_TRIGGER(i);
}
if (s->num_cpu == 1) {
/* For uniprocessor GICs all interrupts always target the sole CPU */

View file

@ -44,9 +44,9 @@
#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm)
#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
#define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = true
#define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = false
#define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger
#define GIC_SET_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = true
#define GIC_CLEAR_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = false
#define GIC_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
s->priority1[irq][cpu] : \
s->priority2[(irq) - GIC_INTERNAL])
@ -61,5 +61,6 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu);
void gic_complete_irq(GICState *s, int cpu, int irq);
void gic_update(GICState *s);
void gic_init_irqs_and_distributor(GICState *s, int num_irq);
void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val);
#endif /* !QEMU_ARM_GIC_INTERNAL_H */

View file

@ -152,7 +152,8 @@ enum {
float_round_nearest_even = 0,
float_round_down = 1,
float_round_up = 2,
float_round_to_zero = 3
float_round_to_zero = 3,
float_round_ties_away = 4,
};
/*----------------------------------------------------------------------------
@ -180,12 +181,22 @@ typedef struct float_status {
flag default_nan_mode;
} float_status;
void set_float_rounding_mode(int val STATUS_PARAM);
void set_float_exception_flags(int val STATUS_PARAM);
INLINE void set_float_detect_tininess(int val STATUS_PARAM)
{
STATUS(float_detect_tininess) = val;
}
INLINE void set_float_rounding_mode(int val STATUS_PARAM)
{
STATUS(float_rounding_mode) = val;
}
INLINE void set_float_exception_flags(int val STATUS_PARAM)
{
STATUS(float_exception_flags) = val;
}
INLINE void set_floatx80_rounding_precision(int val STATUS_PARAM)
{
STATUS(floatx80_rounding_precision) = val;
}
INLINE void set_flush_to_zero(flag val STATUS_PARAM)
{
STATUS(flush_to_zero) = val;
@ -198,11 +209,34 @@ INLINE void set_default_nan_mode(flag val STATUS_PARAM)
{
STATUS(default_nan_mode) = val;
}
INLINE int get_float_detect_tininess(float_status *status)
{
return STATUS(float_detect_tininess);
}
INLINE int get_float_rounding_mode(float_status *status)
{
return STATUS(float_rounding_mode);
}
INLINE int get_float_exception_flags(float_status *status)
{
return STATUS(float_exception_flags);
}
void set_floatx80_rounding_precision(int val STATUS_PARAM);
INLINE int get_floatx80_rounding_precision(float_status *status)
{
return STATUS(floatx80_rounding_precision);
}
INLINE flag get_flush_to_zero(float_status *status)
{
return STATUS(flush_to_zero);
}
INLINE flag get_flush_inputs_to_zero(float_status *status)
{
return STATUS(flush_inputs_to_zero);
}
INLINE flag get_default_nan_mode(float_status *status)
{
return STATUS(default_nan_mode);
}
/*----------------------------------------------------------------------------
| Routine to raise any or all of the software IEC/IEEE floating-point
@ -225,25 +259,48 @@ enum {
/*----------------------------------------------------------------------------
| Software IEC/IEEE integer-to-floating-point conversion routines.
*----------------------------------------------------------------------------*/
float32 int32_to_float32( int32 STATUS_PARAM );
float64 int32_to_float64( int32 STATUS_PARAM );
float32 uint32_to_float32( uint32 STATUS_PARAM );
float64 uint32_to_float64( uint32 STATUS_PARAM );
floatx80 int32_to_floatx80( int32 STATUS_PARAM );
float128 int32_to_float128( int32 STATUS_PARAM );
float32 int64_to_float32( int64 STATUS_PARAM );
float32 uint64_to_float32( uint64 STATUS_PARAM );
float64 int64_to_float64( int64 STATUS_PARAM );
float64 uint64_to_float64( uint64 STATUS_PARAM );
floatx80 int64_to_floatx80( int64 STATUS_PARAM );
float128 int64_to_float128( int64 STATUS_PARAM );
float128 uint64_to_float128( uint64 STATUS_PARAM );
float32 int32_to_float32(int32_t STATUS_PARAM);
float64 int32_to_float64(int32_t STATUS_PARAM);
float32 uint32_to_float32(uint32_t STATUS_PARAM);
float64 uint32_to_float64(uint32_t STATUS_PARAM);
floatx80 int32_to_floatx80(int32_t STATUS_PARAM);
float128 int32_to_float128(int32_t STATUS_PARAM);
float32 int64_to_float32(int64_t STATUS_PARAM);
float32 uint64_to_float32(uint64_t STATUS_PARAM);
float64 int64_to_float64(int64_t STATUS_PARAM);
float64 uint64_to_float64(uint64_t STATUS_PARAM);
floatx80 int64_to_floatx80(int64_t STATUS_PARAM);
float128 int64_to_float128(int64_t STATUS_PARAM);
float128 uint64_to_float128(uint64_t STATUS_PARAM);
/* We provide the int16 versions for symmetry of API with float-to-int */
INLINE float32 int16_to_float32(int16_t v STATUS_PARAM)
{
return int32_to_float32(v STATUS_VAR);
}
INLINE float32 uint16_to_float32(uint16_t v STATUS_PARAM)
{
return uint32_to_float32(v STATUS_VAR);
}
INLINE float64 int16_to_float64(int16_t v STATUS_PARAM)
{
return int32_to_float64(v STATUS_VAR);
}
INLINE float64 uint16_to_float64(uint16_t v STATUS_PARAM)
{
return uint32_to_float64(v STATUS_VAR);
}
/*----------------------------------------------------------------------------
| Software half-precision conversion routines.
*----------------------------------------------------------------------------*/
float16 float32_to_float16( float32, flag STATUS_PARAM );
float32 float16_to_float32( float16, flag STATUS_PARAM );
float16 float64_to_float16(float64 a, flag ieee STATUS_PARAM);
float64 float16_to_float64(float16 a, flag ieee STATUS_PARAM);
/*----------------------------------------------------------------------------
| Software half-precision operations.
@ -265,6 +322,8 @@ extern const float16 float16_default_nan;
/*----------------------------------------------------------------------------
| Software IEC/IEEE single-precision conversion routines.
*----------------------------------------------------------------------------*/
int_fast16_t float32_to_int16(float32 STATUS_PARAM);
uint_fast16_t float32_to_uint16(float32 STATUS_PARAM);
int_fast16_t float32_to_int16_round_to_zero(float32 STATUS_PARAM);
uint_fast16_t float32_to_uint16_round_to_zero(float32 STATUS_PARAM);
int32 float32_to_int32( float32 STATUS_PARAM );
@ -272,6 +331,7 @@ int32 float32_to_int32_round_to_zero( float32 STATUS_PARAM );
uint32 float32_to_uint32( float32 STATUS_PARAM );
uint32 float32_to_uint32_round_to_zero( float32 STATUS_PARAM );
int64 float32_to_int64( float32 STATUS_PARAM );
uint64 float32_to_uint64(float32 STATUS_PARAM);
int64 float32_to_int64_round_to_zero( float32 STATUS_PARAM );
float64 float32_to_float64( float32 STATUS_PARAM );
floatx80 float32_to_floatx80( float32 STATUS_PARAM );
@ -371,6 +431,8 @@ extern const float32 float32_default_nan;
/*----------------------------------------------------------------------------
| Software IEC/IEEE double-precision conversion routines.
*----------------------------------------------------------------------------*/
int_fast16_t float64_to_int16(float64 STATUS_PARAM);
uint_fast16_t float64_to_uint16(float64 STATUS_PARAM);
int_fast16_t float64_to_int16_round_to_zero(float64 STATUS_PARAM);
uint_fast16_t float64_to_uint16_round_to_zero(float64 STATUS_PARAM);
int32 float64_to_int32( float64 STATUS_PARAM );

View file

@ -37,7 +37,7 @@ typedef struct gic_irq_state {
uint8_t active;
uint8_t level;
bool model; /* 0 = N:N, 1 = 1:N */
bool trigger; /* nonzero = edge triggered. */
bool edge_trigger; /* true: edge-triggered, false: level-triggered */
} gic_irq_state;
typedef struct GICState {

View file

@ -7,3 +7,4 @@ struct target_pt_regs {
#define UNAME_MACHINE "aarch64"
#define UNAME_MINIMUM_RELEASE "3.8.0"
#define TARGET_CLONE_BACKWARDS

View file

@ -29,7 +29,10 @@ static inline void cpu_clone_regs(CPUARMState *env, target_ulong newsp)
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
{
env->sr.tpidr_el0 = newtls;
/* Note that AArch64 Linux keeps the TLS pointer in TPIDR; this is
* different from AArch32 Linux, which uses TPIDRRO.
*/
env->cp15.tpidr_el0 = newtls;
}
#endif

View file

@ -29,7 +29,7 @@ static inline void cpu_clone_regs(CPUARMState *env, target_ulong newsp)
static inline void cpu_set_tls(CPUARMState *env, target_ulong newtls)
{
env->cp15.c13_tls2 = newtls;
env->cp15.tpidrro_el0 = newtls;
}
#endif

View file

@ -566,7 +566,7 @@ do_kernel_trap(CPUARMState *env)
end_exclusive();
break;
case 0xffff0fe0: /* __kernel_get_tls */
env->regs[0] = env->cp15.c13_tls2;
env->regs[0] = env->cp15.tpidrro_el0;
break;
case 0xffff0f60: /* __kernel_cmpxchg64 */
arm_kernel_cmpxchg64_helper(env);
@ -585,20 +585,25 @@ do_kernel_trap(CPUARMState *env)
return 0;
}
#endif
/* Store exclusive handling for AArch32 */
static int do_strex(CPUARMState *env)
{
uint32_t val;
uint64_t val;
int size;
int rc = 1;
int segv = 0;
uint32_t addr;
start_exclusive();
addr = env->exclusive_addr;
if (addr != env->exclusive_test) {
if (env->exclusive_addr != env->exclusive_test) {
goto fail;
}
/* We know we're always AArch32 so the address is in uint32_t range
* unless it was the -1 exclusive-monitor-lost value (which won't
* match exclusive_test above).
*/
assert(extract64(env->exclusive_addr, 32, 32) == 0);
addr = env->exclusive_addr;
size = env->exclusive_info & 0xf;
switch (size) {
case 0:
@ -618,19 +623,19 @@ static int do_strex(CPUARMState *env)
env->cp15.c6_data = addr;
goto done;
}
if (val != env->exclusive_val) {
goto fail;
}
if (size == 3) {
segv = get_user_u32(val, addr + 4);
uint32_t valhi;
segv = get_user_u32(valhi, addr + 4);
if (segv) {
env->cp15.c6_data = addr + 4;
goto done;
}
if (val != env->exclusive_high) {
goto fail;
}
val = deposit64(val, 32, 32, valhi);
}
if (val != env->exclusive_val) {
goto fail;
}
val = env->regs[(env->exclusive_info >> 8) & 0xf];
switch (size) {
case 0:
@ -665,7 +670,6 @@ done:
return segv;
}
#ifdef TARGET_ABI32
void cpu_loop(CPUARMState *env)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
@ -880,6 +884,122 @@ void cpu_loop(CPUARMState *env)
#else
/*
* Handle AArch64 store-release exclusive
*
* rs = gets the status result of store exclusive
* rt = is the register that is stored
* rt2 = is the second register store (in STP)
*
*/
static int do_strex_a64(CPUARMState *env)
{
uint64_t val;
int size;
bool is_pair;
int rc = 1;
int segv = 0;
uint64_t addr;
int rs, rt, rt2;
start_exclusive();
/* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
size = extract32(env->exclusive_info, 0, 2);
is_pair = extract32(env->exclusive_info, 2, 1);
rs = extract32(env->exclusive_info, 4, 5);
rt = extract32(env->exclusive_info, 9, 5);
rt2 = extract32(env->exclusive_info, 14, 5);
addr = env->exclusive_addr;
if (addr != env->exclusive_test) {
goto finish;
}
switch (size) {
case 0:
segv = get_user_u8(val, addr);
break;
case 1:
segv = get_user_u16(val, addr);
break;
case 2:
segv = get_user_u32(val, addr);
break;
case 3:
segv = get_user_u64(val, addr);
break;
default:
abort();
}
if (segv) {
env->cp15.c6_data = addr;
goto error;
}
if (val != env->exclusive_val) {
goto finish;
}
if (is_pair) {
if (size == 2) {
segv = get_user_u32(val, addr + 4);
} else {
segv = get_user_u64(val, addr + 8);
}
if (segv) {
env->cp15.c6_data = addr + (size == 2 ? 4 : 8);
goto error;
}
if (val != env->exclusive_high) {
goto finish;
}
}
val = env->xregs[rt];
switch (size) {
case 0:
segv = put_user_u8(val, addr);
break;
case 1:
segv = put_user_u16(val, addr);
break;
case 2:
segv = put_user_u32(val, addr);
break;
case 3:
segv = put_user_u64(val, addr);
break;
}
if (segv) {
goto error;
}
if (is_pair) {
val = env->xregs[rt2];
if (size == 2) {
segv = put_user_u32(val, addr + 4);
} else {
segv = put_user_u64(val, addr + 8);
}
if (segv) {
env->cp15.c6_data = addr + (size == 2 ? 4 : 8);
goto error;
}
}
rc = 0;
finish:
env->pc += 4;
/* rs == 31 encodes a write to the ZR, thus throwing away
* the status return. This is rather silly but valid.
*/
if (rs < 31) {
env->xregs[rs] = rc;
}
error:
/* instruction faulted, PC does not advance */
/* either way a strex releases any exclusive lock we have */
env->exclusive_addr = -1;
end_exclusive();
return segv;
}
/* AArch64 main loop */
void cpu_loop(CPUARMState *env)
{
@ -939,7 +1059,7 @@ void cpu_loop(CPUARMState *env)
}
break;
case EXCP_STREX:
if (do_strex(env)) {
if (do_strex_a64(env)) {
addr = env->cp15.c6_data;
goto do_segv;
}
@ -951,6 +1071,12 @@ void cpu_loop(CPUARMState *env)
abort();
}
process_pending_signals(env);
/* Exception return on AArch64 always clears the exclusive monitor,
* so any return to running guest code implies this.
* A strex (successful or otherwise) also clears the monitor, so
* we don't need to specialcase EXCP_STREX.
*/
env->exclusive_addr = -1;
}
}
#endif /* ndef TARGET_ABI32 */

View file

@ -1189,8 +1189,8 @@ static int target_setup_sigframe(struct target_rt_sigframe *sf,
__put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
#endif
}
__put_user(/*env->fpsr*/0, &aux->fpsimd.fpsr);
__put_user(/*env->fpcr*/0, &aux->fpsimd.fpcr);
__put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
__put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
__put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
__put_user(sizeof(struct target_fpsimd_context),
&aux->fpsimd.head.size);
@ -1209,7 +1209,7 @@ static int target_restore_sigframe(CPUARMState *env,
int i;
struct target_aux_context *aux =
(struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
uint32_t magic, size;
uint32_t magic, size, fpsr, fpcr;
uint64_t pstate;
target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
@ -1235,6 +1235,10 @@ static int target_restore_sigframe(CPUARMState *env,
for (i = 0; i < 32 * 2; i++) {
__get_user(env->vfp.regs[i], &aux->fpsimd.vregs[i]);
}
__get_user(fpsr, &aux->fpsimd.fpsr);
vfp_set_fpsr(env, fpsr);
__get_user(fpcr, &aux->fpsimd.fpcr);
vfp_set_fpcr(env, fpcr);
return 0;
}

View file

@ -66,6 +66,18 @@
/* ARM-specific interrupt pending bits. */
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
/* The usual mapping for an AArch64 system register to its AArch32
* counterpart is for the 32 bit world to have access to the lower
* half only (with writes leaving the upper half untouched). It's
* therefore useful to be able to pass TCG the offset of the least
* significant half of a uint64_t struct member.
*/
#ifdef HOST_WORDS_BIGENDIAN
#define offsetoflow32(S, M) offsetof(S, M + sizeof(uint32_t))
#else
#define offsetoflow32(S, M) offsetof(S, M)
#endif
/* Meanings of the ARMCPU object's two inbound GPIO lines */
#define ARM_CPU_IRQ 0
#define ARM_CPU_FIQ 1
@ -188,9 +200,9 @@ typedef struct CPUARMState {
uint32_t c12_vbar; /* vector base address register */
uint32_t c13_fcse; /* FCSE PID. */
uint32_t c13_context; /* Context ID. */
uint32_t c13_tls1; /* User RW Thread register. */
uint32_t c13_tls2; /* User RO Thread register. */
uint32_t c13_tls3; /* Privileged Thread register. */
uint64_t tpidr_el0; /* User RW Thread register. */
uint64_t tpidrro_el0; /* User RO Thread register. */
uint64_t tpidr_el1; /* Privileged Thread register. */
uint32_t c14_cntfrq; /* Counter Frequency register */
uint32_t c14_cntkctl; /* Timer Control register */
ARMGenericTimer c14_timer[NUM_GTIMERS];
@ -266,11 +278,11 @@ typedef struct CPUARMState {
float_status fp_status;
float_status standard_fp_status;
} vfp;
uint32_t exclusive_addr;
uint32_t exclusive_val;
uint32_t exclusive_high;
uint64_t exclusive_addr;
uint64_t exclusive_val;
uint64_t exclusive_high;
#if defined(CONFIG_USER_ONLY)
uint32_t exclusive_test;
uint64_t exclusive_test;
uint32_t exclusive_info;
#endif
@ -475,6 +487,15 @@ static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
vfp_set_fpscr(env, new_fpscr);
}
enum arm_fprounding {
FPROUNDING_TIEEVEN,
FPROUNDING_POSINF,
FPROUNDING_NEGINF,
FPROUNDING_ZERO,
FPROUNDING_TIEAWAY,
FPROUNDING_ODD
};
enum arm_cpu_mode {
ARM_CPU_MODE_USR = 0x10,
ARM_CPU_MODE_FIQ = 0x11,
@ -572,18 +593,43 @@ void armv7m_nvic_complete_irq(void *opaque, int irq);
* or via MRRC/MCRR?)
* We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
* (In this case crn and opc2 should be zero.)
* For AArch64, there is no 32/64 bit size distinction;
* instead all registers have a 2 bit op0, 3 bit op1 and op2,
* and 4 bit CRn and CRm. The encoding patterns are chosen
* to be easy to convert to and from the KVM encodings, and also
* so that the hashtable can contain both AArch32 and AArch64
* registers (to allow for interprocessing where we might run
* 32 bit code on a 64 bit core).
*/
/* This bit is private to our hashtable cpreg; in KVM register
* IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
* in the upper bits of the 64 bit ID.
*/
#define CP_REG_AA64_SHIFT 28
#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
#define ENCODE_CP_REG(cp, is64, crn, crm, opc1, opc2) \
(((cp) << 16) | ((is64) << 15) | ((crn) << 11) | \
((crm) << 7) | ((opc1) << 3) | (opc2))
#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
(CP_REG_AA64_MASK | \
((cp) << CP_REG_ARM_COPROC_SHIFT) | \
((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
/* Convert a full 64 bit KVM register ID to the truncated 32 bit
* version used as a key for the coprocessor register hashtable
*/
static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
{
uint32_t cpregid = kvmid;
if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
cpregid |= CP_REG_AA64_MASK;
} else if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
cpregid |= (1 << 15);
}
return cpregid;
@ -594,11 +640,18 @@ static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
*/
static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
{
uint64_t kvmid = cpregid & ~(1 << 15);
if (cpregid & (1 << 15)) {
kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
uint64_t kvmid;
if (cpregid & CP_REG_AA64_MASK) {
kvmid = cpregid & ~CP_REG_AA64_MASK;
kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
} else {
kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
kvmid = cpregid & ~(1 << 15);
if (cpregid & (1 << 15)) {
kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
} else {
kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
}
}
return kvmid;
}
@ -628,12 +681,28 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
#define ARM_CP_IO 64
#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
#define ARM_LAST_SPECIAL ARM_CP_WFI
#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
#define ARM_LAST_SPECIAL ARM_CP_NZCV
/* Used only as a terminator for ARMCPRegInfo lists */
#define ARM_CP_SENTINEL 0xffff
/* Mask of only the flag bits in a type field */
#define ARM_CP_FLAG_MASK 0x7f
/* Valid values for ARMCPRegInfo state field, indicating which of
* the AArch32 and AArch64 execution states this register is visible in.
* If the reginfo doesn't explicitly specify then it is AArch32 only.
* If the reginfo is declared to be visible in both states then a second
* reginfo is synthesised for the AArch32 view of the AArch64 register,
* such that the AArch32 view is the lower 32 bits of the AArch64 one.
* Note that we rely on the values of these enums as we iterate through
* the various states in some places.
*/
enum {
ARM_CP_STATE_AA32 = 0,
ARM_CP_STATE_AA64 = 1,
ARM_CP_STATE_BOTH = 2,
};
/* Return true if cptype is a valid type field. This is used to try to
* catch errors where the sentinel has been accidentally left off the end
* of a list of registers.
@ -655,6 +724,8 @@ static inline bool cptype_valid(int cptype)
* (ie anything visible in PL2 is visible in S-PL1, some things are only
* visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
* terminology a little and call this PL3.
* In AArch64 things are somewhat simpler as the PLx bits line up exactly
* with the ELx exception levels.
*
* If access permissions for a register are more complex than can be
* described with these bits, then use a laxer set of restrictions, and
@ -676,6 +747,10 @@ static inline bool cptype_valid(int cptype)
static inline int arm_current_pl(CPUARMState *env)
{
if (env->aarch64) {
return extract32(env->pstate, 2, 2);
}
if ((env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR) {
return 0;
}
@ -713,12 +788,22 @@ struct ARMCPRegInfo {
* then behave differently on read/write if necessary.
* For 64 bit registers, only crm and opc1 are relevant; crn and opc2
* must both be zero.
* For AArch64-visible registers, opc0 is also used.
* Since there are no "coprocessors" in AArch64, cp is purely used as a
* way to distinguish (for KVM's benefit) guest-visible system registers
* from demuxed ones provided to preserve the "no side effects on
* KVM register read/write from QEMU" semantics. cp==0x13 is guest
* visible (to match KVM's encoding); cp==0 will be converted to
* cp==0x13 when the ARMCPRegInfo is registered, for convenience.
*/
uint8_t cp;
uint8_t crn;
uint8_t crm;
uint8_t opc0;
uint8_t opc1;
uint8_t opc2;
/* Execution state in which this register is visible: ARM_CP_STATE_* */
int state;
/* Register type: ARM_CP_* bits/values */
int type;
/* Access rights: PL*_[RW] */
@ -790,7 +875,7 @@ static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
{
define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
}
const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp);
const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
/* CPWriteFn that can be used to implement writes-ignored behaviour */
int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
@ -798,10 +883,15 @@ int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
/* CPReadFn that can be used for read-as-zero behaviour */
int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value);
static inline bool cp_access_ok(CPUARMState *env,
/* CPResetFn that does nothing, for use if no reset is required even
* if fieldoffset is non zero.
*/
void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
static inline bool cp_access_ok(int current_pl,
const ARMCPRegInfo *ri, int isread)
{
return (ri->access >> ((arm_current_pl(env) * 2) + isread)) & 1;
return (ri->access >> ((current_pl * 2) + isread)) & 1;
}
/**

View file

@ -58,6 +58,7 @@ static const ARMCPUInfo aarch64_cpus[] = {
#ifdef CONFIG_USER_ONLY
{ .name = "any", .initfn = aarch64_any_initfn },
#endif
{ .name = NULL } /* TODO: drop when we support more CPUs */
};
static void aarch64_cpu_initfn(Object *obj)
@ -100,6 +101,11 @@ static void aarch64_cpu_register(const ARMCPUInfo *info)
.class_init = info->class_init,
};
/* TODO: drop when we support more CPUs - all entries will have name set */
if (!info->name) {
return;
}
type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
type_register(&type_info);
g_free((void *)type_info.name);

View file

@ -77,3 +77,48 @@ uint64_t HELPER(rbit64)(uint64_t x)
return x;
}
/* Convert a softfloat float_relation_ (as returned by
* the float*_compare functions) to the correct ARM
* NZCV flag state.
*/
static inline uint32_t float_rel_to_flags(int res)
{
uint64_t flags;
switch (res) {
case float_relation_equal:
flags = PSTATE_Z | PSTATE_C;
break;
case float_relation_less:
flags = PSTATE_N;
break;
case float_relation_greater:
flags = PSTATE_C;
break;
case float_relation_unordered:
default:
flags = PSTATE_C | PSTATE_V;
break;
}
return flags;
}
uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
{
return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
}
uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
{
return float_rel_to_flags(float32_compare(x, y, fp_status));
}
uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
{
return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
}
uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
{
return float_rel_to_flags(float64_compare(x, y, fp_status));
}

View file

@ -22,3 +22,7 @@ DEF_HELPER_FLAGS_1(clz64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(cls64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(cls32, TCG_CALL_NO_RWG_SE, i32, i32)
DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr)
DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr)
DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr)
DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr)

View file

@ -142,11 +142,7 @@ static bool read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
} else if (ri->readfn) {
return (ri->readfn(env, ri, v) == 0);
} else {
if (ri->type & ARM_CP_64BIT) {
*v = CPREG_FIELD64(env, ri);
} else {
*v = CPREG_FIELD32(env, ri);
}
raw_read(env, ri, v);
}
return true;
}
@ -167,11 +163,7 @@ static bool write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
} else if (ri->writefn) {
return (ri->writefn(env, ri, v) == 0);
} else {
if (ri->type & ARM_CP_64BIT) {
CPREG_FIELD64(env, ri) = v;
} else {
CPREG_FIELD32(env, ri) = v;
}
raw_write(env, ri, v);
}
return true;
}
@ -186,7 +178,7 @@ bool write_cpustate_to_list(ARMCPU *cpu)
uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
const ARMCPRegInfo *ri;
uint64_t v;
ri = get_arm_cp_reginfo(cpu, regidx);
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
if (!ri) {
ok = false;
continue;
@ -214,7 +206,7 @@ bool write_list_to_cpustate(ARMCPU *cpu)
uint64_t readback;
const ARMCPRegInfo *ri;
ri = get_arm_cp_reginfo(cpu, regidx);
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
if (!ri) {
ok = false;
continue;
@ -242,7 +234,7 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque)
const ARMCPRegInfo *ri;
regidx = *(uint32_t *)key;
ri = get_arm_cp_reginfo(cpu, regidx);
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
if (!(ri->type & ARM_CP_NO_MIGRATE)) {
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
@ -258,7 +250,7 @@ static void count_cpreg(gpointer key, gpointer opaque)
const ARMCPRegInfo *ri;
regidx = *(uint32_t *)key;
ri = get_arm_cp_reginfo(cpu, regidx);
ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
if (!(ri->type & ARM_CP_NO_MIGRATE)) {
cpu->cpreg_array_len++;
@ -397,7 +389,7 @@ static const ARMCPRegInfo cp_reginfo[] = {
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
.resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
{ .name = "CONTEXTIDR", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_fcse),
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c13_context),
.resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
/* ??? This covers not just the impdef TLB lockdown registers but also
* some v7VMSA registers relating to TEX remap, so it is overly broad.
@ -740,18 +732,26 @@ static const ARMCPRegInfo t2ee_cp_reginfo[] = {
};
static const ARMCPRegInfo v6k_cp_reginfo[] = {
{ .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
.access = PL0_RW,
.fieldoffset = offsetof(CPUARMState, cp15.tpidr_el0), .resetvalue = 0 },
{ .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL0_RW,
.fieldoffset = offsetof(CPUARMState, cp15.c13_tls1),
.resetvalue = 0 },
.fieldoffset = offsetoflow32(CPUARMState, cp15.tpidr_el0),
.resetfn = arm_cp_reset_ignore },
{ .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
.access = PL0_R|PL1_W,
.fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el0), .resetvalue = 0 },
{ .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
.access = PL0_R|PL1_W,
.fieldoffset = offsetof(CPUARMState, cp15.c13_tls2),
.resetvalue = 0 },
{ .name = "TPIDRPRW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 4,
.fieldoffset = offsetoflow32(CPUARMState, cp15.tpidrro_el0),
.resetfn = arm_cp_reset_ignore },
{ .name = "TPIDR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
.access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, cp15.c13_tls3),
.resetvalue = 0 },
.fieldoffset = offsetof(CPUARMState, cp15.tpidr_el1), .resetvalue = 0 },
REGINFO_SENTINEL
};
@ -1560,6 +1560,64 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = {
REGINFO_SENTINEL
};
static int aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t *value)
{
*value = vfp_get_fpcr(env);
return 0;
}
static int aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
vfp_set_fpcr(env, value);
return 0;
}
static int aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t *value)
{
*value = vfp_get_fpsr(env);
return 0;
}
static int aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
vfp_set_fpsr(env, value);
return 0;
}
static const ARMCPRegInfo v8_cp_reginfo[] = {
/* Minimal set of EL0-visible registers. This will need to be expanded
* significantly for system emulation of AArch64 CPUs.
*/
{ .name = "NZCV", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
.access = PL0_RW, .type = ARM_CP_NZCV },
{ .name = "FPCR", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
.access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
{ .name = "FPSR", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
.access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
/* This claims a 32 byte cacheline size for icache and dcache, VIPT icache.
* It will eventually need to have a CPU-specified reset value.
*/
{ .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
.access = PL0_R, .type = ARM_CP_CONST,
.resetvalue = 0x80030003 },
/* Prohibit use of DC ZVA. OPTME: implement DC ZVA and allow its use.
* For system mode the DZP bit here will need to be computed, not constant.
*/
{ .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
.access = PL0_R, .type = ARM_CP_CONST,
.resetvalue = 0x10 },
REGINFO_SENTINEL
};
static int sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
env->cp15.c1_sys = value;
@ -1662,6 +1720,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
} else {
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_V8)) {
define_arm_cp_regs(cpu, v8_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_MPU)) {
/* These are the MPU registers prior to PMSAv6. Any new
* PMSA core later than the ARM946 will require that we
@ -1937,6 +1998,85 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
return cpu_list;
}
static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
void *opaque, int state,
int crm, int opc1, int opc2)
{
/* Private utility function for define_one_arm_cp_reg_with_opaque():
* add a single reginfo struct to the hash table.
*/
uint32_t *key = g_new(uint32_t, 1);
ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
if (r->state == ARM_CP_STATE_BOTH && state == ARM_CP_STATE_AA32) {
/* The AArch32 view of a shared register sees the lower 32 bits
* of a 64 bit backing field. It is not migratable as the AArch64
* view handles that. AArch64 also handles reset.
* We assume it is a cp15 register.
*/
r2->cp = 15;
r2->type |= ARM_CP_NO_MIGRATE;
r2->resetfn = arm_cp_reset_ignore;
#ifdef HOST_WORDS_BIGENDIAN
if (r2->fieldoffset) {
r2->fieldoffset += sizeof(uint32_t);
}
#endif
}
if (state == ARM_CP_STATE_AA64) {
/* To allow abbreviation of ARMCPRegInfo
* definitions, we treat cp == 0 as equivalent to
* the value for "standard guest-visible sysreg".
*/
if (r->cp == 0) {
r2->cp = CP_REG_ARM64_SYSREG_CP;
}
*key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
r2->opc0, opc1, opc2);
} else {
*key = ENCODE_CP_REG(r2->cp, is64, r2->crn, crm, opc1, opc2);
}
if (opaque) {
r2->opaque = opaque;
}
/* Make sure reginfo passed to helpers for wildcarded regs
* has the correct crm/opc1/opc2 for this reg, not CP_ANY:
*/
r2->crm = crm;
r2->opc1 = opc1;
r2->opc2 = opc2;
/* By convention, for wildcarded registers only the first
* entry is used for migration; the others are marked as
* NO_MIGRATE so we don't try to transfer the register
* multiple times. Special registers (ie NOP/WFI) are
* never migratable.
*/
if ((r->type & ARM_CP_SPECIAL) ||
((r->crm == CP_ANY) && crm != 0) ||
((r->opc1 == CP_ANY) && opc1 != 0) ||
((r->opc2 == CP_ANY) && opc2 != 0)) {
r2->type |= ARM_CP_NO_MIGRATE;
}
/* Overriding of an existing definition must be explicitly
* requested.
*/
if (!(r->type & ARM_CP_OVERRIDE)) {
ARMCPRegInfo *oldreg;
oldreg = g_hash_table_lookup(cpu->cp_regs, key);
if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
fprintf(stderr, "Register redefined: cp=%d %d bit "
"crn=%d crm=%d opc1=%d opc2=%d, "
"was %s, now %s\n", r2->cp, 32 + 32 * is64,
r2->crn, r2->crm, r2->opc1, r2->opc2,
oldreg->name, r2->name);
g_assert_not_reached();
}
}
g_hash_table_insert(cpu->cp_regs, key, r2);
}
void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
const ARMCPRegInfo *r, void *opaque)
{
@ -1951,8 +2091,19 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
* At least one of the original and the second definition should
* include ARM_CP_OVERRIDE in its type bits -- this is just a guard
* against accidental use.
*
* The state field defines whether the register is to be
* visible in the AArch32 or AArch64 execution state. If the
* state is set to ARM_CP_STATE_BOTH then we synthesise a
* reginfo structure for the AArch32 view, which sees the lower
* 32 bits of the 64 bit register.
*
* Only registers visible in AArch64 may set r->opc0; opc0 cannot
* be wildcarded. AArch64 registers are always considered to be 64
* bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
* the register, if any.
*/
int crm, opc1, opc2;
int crm, opc1, opc2, state;
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
@ -1961,6 +2112,52 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
/* 64 bit registers have only CRm and Opc1 fields */
assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
/* op0 only exists in the AArch64 encodings */
assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
/* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
/* The AArch64 pseudocode CheckSystemAccess() specifies that op1
* encodes a minimum access level for the register. We roll this
* runtime check into our general permission check code, so check
* here that the reginfo's specified permissions are strict enough
* to encompass the generic architectural permission check.
*/
if (r->state != ARM_CP_STATE_AA32) {
int mask = 0;
switch (r->opc1) {
case 0: case 1: case 2:
/* min_EL EL1 */
mask = PL1_RW;
break;
case 3:
/* min_EL EL0 */
mask = PL0_RW;
break;
case 4:
/* min_EL EL2 */
mask = PL2_RW;
break;
case 5:
/* unallocated encoding, so not possible */
assert(false);
break;
case 6:
/* min_EL EL3 */
mask = PL3_RW;
break;
case 7:
/* min_EL EL1, secure mode only (we don't check the latter) */
mask = PL1_RW;
break;
default:
/* broken reginfo with out-of-range opc1 */
assert(false);
break;
}
/* assert our permissions are not too lax (stricter is fine) */
assert((r->access & ~mask) == 0);
}
/* Check that the register definition has enough info to handle
* reads and writes if they are permitted.
*/
@ -1977,48 +2174,14 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
for (crm = crmmin; crm <= crmmax; crm++) {
for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
uint32_t *key = g_new(uint32_t, 1);
ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
*key = ENCODE_CP_REG(r->cp, is64, r->crn, crm, opc1, opc2);
if (opaque) {
r2->opaque = opaque;
}
/* Make sure reginfo passed to helpers for wildcarded regs
* has the correct crm/opc1/opc2 for this reg, not CP_ANY:
*/
r2->crm = crm;
r2->opc1 = opc1;
r2->opc2 = opc2;
/* By convention, for wildcarded registers only the first
* entry is used for migration; the others are marked as
* NO_MIGRATE so we don't try to transfer the register
* multiple times. Special registers (ie NOP/WFI) are
* never migratable.
*/
if ((r->type & ARM_CP_SPECIAL) ||
((r->crm == CP_ANY) && crm != 0) ||
((r->opc1 == CP_ANY) && opc1 != 0) ||
((r->opc2 == CP_ANY) && opc2 != 0)) {
r2->type |= ARM_CP_NO_MIGRATE;
}
/* Overriding of an existing definition must be explicitly
* requested.
*/
if (!(r->type & ARM_CP_OVERRIDE)) {
ARMCPRegInfo *oldreg;
oldreg = g_hash_table_lookup(cpu->cp_regs, key);
if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
fprintf(stderr, "Register redefined: cp=%d %d bit "
"crn=%d crm=%d opc1=%d opc2=%d, "
"was %s, now %s\n", r2->cp, 32 + 32 * is64,
r2->crn, r2->crm, r2->opc1, r2->opc2,
oldreg->name, r2->name);
g_assert_not_reached();
for (state = ARM_CP_STATE_AA32;
state <= ARM_CP_STATE_AA64; state++) {
if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
continue;
}
add_cpreg_to_hashtable(cpu, r, opaque, state,
crm, opc1, opc2);
}
g_hash_table_insert(cpu->cp_regs, key, r2);
}
}
}
@ -2034,9 +2197,9 @@ void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
}
}
const ARMCPRegInfo *get_arm_cp_reginfo(ARMCPU *cpu, uint32_t encoded_cp)
const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
{
return g_hash_table_lookup(cpu->cp_regs, &encoded_cp);
return g_hash_table_lookup(cpregs, &encoded_cp);
}
int arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
@ -2053,6 +2216,11 @@ int arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
return 0;
}
void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
{
/* Helper coprocessor reset function for do-nothing-on-reset registers */
}
static int bad_mode_switch(CPUARMState *env, int mode)
{
/* Return true if it is not valid for us to switch to
@ -3639,16 +3807,16 @@ void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
if (changed & (3 << 22)) {
i = (val >> 22) & 3;
switch (i) {
case 0:
case FPROUNDING_TIEEVEN:
i = float_round_nearest_even;
break;
case 1:
case FPROUNDING_POSINF:
i = float_round_up;
break;
case 2:
case FPROUNDING_NEGINF:
i = float_round_down;
break;
case 3:
case FPROUNDING_ZERO:
i = float_round_to_zero;
break;
}
@ -3688,6 +3856,10 @@ VFP_BINOP(add)
VFP_BINOP(sub)
VFP_BINOP(mul)
VFP_BINOP(div)
VFP_BINOP(min)
VFP_BINOP(max)
VFP_BINOP(minnum)
VFP_BINOP(maxnum)
#undef VFP_BINOP
float32 VFP_HELPER(neg, s)(float32 a)
@ -3804,37 +3976,77 @@ float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
}
/* VFP3 fixed point conversion. */
#define VFP_CONV_FIX(name, p, fsz, itype, sign) \
float##fsz HELPER(vfp_##name##to##p)(uint##fsz##_t x, uint32_t shift, \
void *fpstp) \
#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
void *fpstp) \
{ \
float_status *fpst = fpstp; \
float##fsz tmp; \
tmp = sign##int32_to_##float##fsz((itype##_t)x, fpst); \
tmp = itype##_to_##float##fsz(x, fpst); \
return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
} \
uint##fsz##_t HELPER(vfp_to##name##p)(float##fsz x, uint32_t shift, \
void *fpstp) \
}
/* Notice that we want only input-denormal exception flags from the
* scalbn operation: the other possible flags (overflow+inexact if
* we overflow to infinity, output-denormal) aren't correct for the
* complete scale-and-convert operation.
*/
#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
uint32_t shift, \
void *fpstp) \
{ \
float_status *fpst = fpstp; \
int old_exc_flags = get_float_exception_flags(fpst); \
float##fsz tmp; \
if (float##fsz##_is_any_nan(x)) { \
float_raise(float_flag_invalid, fpst); \
return 0; \
} \
tmp = float##fsz##_scalbn(x, shift, fpst); \
return float##fsz##_to_##itype##_round_to_zero(tmp, fpst); \
old_exc_flags |= get_float_exception_flags(fpst) \
& float_flag_input_denormal; \
set_float_exception_flags(old_exc_flags, fpst); \
return float##fsz##_to_##itype##round(tmp, fpst); \
}
VFP_CONV_FIX(sh, d, 64, int16, )
VFP_CONV_FIX(sl, d, 64, int32, )
VFP_CONV_FIX(uh, d, 64, uint16, u)
VFP_CONV_FIX(ul, d, 64, uint32, u)
VFP_CONV_FIX(sh, s, 32, int16, )
VFP_CONV_FIX(sl, s, 32, int32, )
VFP_CONV_FIX(uh, s, 32, uint16, u)
VFP_CONV_FIX(ul, s, 32, uint32, u)
#define VFP_CONV_FIX(name, p, fsz, isz, itype) \
VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
VFP_CONV_FIX(sh, d, 64, 64, int16)
VFP_CONV_FIX(sl, d, 64, 64, int32)
VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
VFP_CONV_FIX(uh, d, 64, 64, uint16)
VFP_CONV_FIX(ul, d, 64, 64, uint32)
VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
VFP_CONV_FIX(sh, s, 32, 32, int16)
VFP_CONV_FIX(sl, s, 32, 32, int32)
VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
VFP_CONV_FIX(uh, s, 32, 32, uint16)
VFP_CONV_FIX(ul, s, 32, 32, uint32)
VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
#undef VFP_CONV_FIX
#undef VFP_CONV_FIX_FLOAT
#undef VFP_CONV_FLOAT_FIX_ROUND
/* Set the current fp rounding mode and return the old one.
* The argument is a softfloat float_round_ value.
*/
uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
{
float_status *fp_status = &env->vfp.fp_status;
uint32_t prev_rmode = get_float_rounding_mode(fp_status);
set_float_rounding_mode(rmode, fp_status);
return prev_rmode;
}
/* Half precision conversions. */
static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
@ -3877,6 +4089,26 @@ uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
}
float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
{
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
if (ieee) {
return float64_maybe_silence_nan(r);
}
return r;
}
uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
{
int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
if (ieee) {
r = float16_maybe_silence_nan(r);
}
return float16_val(r);
}
#define float32_two make_float32(0x40000000)
#define float32_three make_float32(0x40400000)
#define float32_one_point_five make_float32(0x3fc00000)
@ -4142,27 +4374,47 @@ float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
return float64_muladd(a, b, c, 0, fpst);
}
/* ARMv8 VMAXNM/VMINNM */
float32 VFP_HELPER(maxnm, s)(float32 a, float32 b, void *fpstp)
/* ARMv8 round to integral */
float32 HELPER(rints_exact)(float32 x, void *fp_status)
{
float_status *fpst = fpstp;
return float32_maxnum(a, b, fpst);
return float32_round_to_int(x, fp_status);
}
float64 VFP_HELPER(maxnm, d)(float64 a, float64 b, void *fpstp)
float64 HELPER(rintd_exact)(float64 x, void *fp_status)
{
float_status *fpst = fpstp;
return float64_maxnum(a, b, fpst);
return float64_round_to_int(x, fp_status);
}
float32 VFP_HELPER(minnm, s)(float32 a, float32 b, void *fpstp)
float32 HELPER(rints)(float32 x, void *fp_status)
{
float_status *fpst = fpstp;
return float32_minnum(a, b, fpst);
int old_flags = get_float_exception_flags(fp_status), new_flags;
float32 ret;
ret = float32_round_to_int(x, fp_status);
/* Suppress any inexact exceptions the conversion produced */
if (!(old_flags & float_flag_inexact)) {
new_flags = get_float_exception_flags(fp_status);
set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
}
return ret;
}
float64 VFP_HELPER(minnm, d)(float64 a, float64 b, void *fpstp)
float64 HELPER(rintd)(float64 x, void *fp_status)
{
float_status *fpst = fpstp;
return float64_minnum(a, b, fpst);
int old_flags = get_float_exception_flags(fp_status), new_flags;
float64 ret;
ret = float64_round_to_int(x, fp_status);
new_flags = get_float_exception_flags(fp_status);
/* Suppress any inexact exceptions the conversion produced */
if (!(old_flags & float_flag_inexact)) {
new_flags = get_float_exception_flags(fp_status);
set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
}
return ret;
}

View file

@ -79,6 +79,14 @@ DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr)
DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr)
DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr)
DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr)
DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr)
DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr)
DEF_HELPER_1(vfp_negs, f32, f32)
DEF_HELPER_1(vfp_negd, f64, f64)
DEF_HELPER_1(vfp_abss, f32, f32)
@ -107,36 +115,51 @@ DEF_HELPER_2(vfp_tosid, i32, f64, ptr)
DEF_HELPER_2(vfp_tosizs, i32, f32, ptr)
DEF_HELPER_2(vfp_tosizd, i32, f64, ptr)
DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr)
DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr)
DEF_HELPER_3(vfp_touqs, i64, f32, i32, ptr)
DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_tosqd, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_touqd, i64, f64, i32, ptr)
DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr)
DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr)
DEF_HELPER_3(vfp_sqtos, f32, i64, i32, ptr)
DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr)
DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr)
DEF_HELPER_3(vfp_uqtos, f32, i64, i32, ptr)
DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr)
DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr)
DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr)
DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr)
DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr)
DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr)
DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, env)
DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env)
DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env)
DEF_HELPER_2(neon_fcvt_f16_to_f32, f32, i32, env)
DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env)
DEF_HELPER_FLAGS_2(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, i32, env)
DEF_HELPER_FLAGS_2(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, i32, f64, env)
DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_maxnmd, f64, f64, f64, ptr)
DEF_HELPER_3(vfp_maxnms, f32, f32, f32, ptr)
DEF_HELPER_3(vfp_minnmd, f64, f64, f64, ptr)
DEF_HELPER_3(vfp_minnms, f32, f32, f32, ptr)
DEF_HELPER_3(recps_f32, f32, f32, f32, env)
DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env)
DEF_HELPER_2(recpe_f32, f32, f32, env)
@ -150,6 +173,11 @@ DEF_HELPER_3(shr_cc, i32, env, i32, i32)
DEF_HELPER_3(sar_cc, i32, env, i32, i32)
DEF_HELPER_3(ror_cc, i32, env, i32, i32)
DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr)
DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr)
DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr)
DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr)
/* neon_helper.c */
DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
DEF_HELPER_3(neon_qadd_s8, i32, env, i32, i32)
@ -346,8 +374,6 @@ DEF_HELPER_2(neon_qneg_s8, i32, env, i32)
DEF_HELPER_2(neon_qneg_s16, i32, env, i32)
DEF_HELPER_2(neon_qneg_s32, i32, env, i32)
DEF_HELPER_3(neon_min_f32, i32, i32, i32, ptr)
DEF_HELPER_3(neon_max_f32, i32, i32, i32, ptr)
DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr)
DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr)
DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr)

View file

@ -29,12 +29,14 @@
#define CP_REG_SIZE_U32 0x0020000000000000ULL
#define CP_REG_SIZE_U64 0x0030000000000000ULL
#define CP_REG_ARM 0x4000000000000000ULL
#define CP_REG_ARCH_MASK 0xff00000000000000ULL
MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT)
MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK)
MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32)
MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64)
MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM)
MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK)
#define PSCI_FN_BASE 0x95c1ba5e
#define PSCI_FN(n) (PSCI_FN_BASE + (n))
@ -59,6 +61,41 @@ MISMATCH_CHECK(PSCI_FN_MIGRATE, KVM_PSCI_FN_MIGRATE)
MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15)
#endif
#define CP_REG_ARM64 0x6000000000000000ULL
#define CP_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define CP_REG_ARM_COPROC_SHIFT 16
#define CP_REG_ARM64_SYSREG (0x0013 << CP_REG_ARM_COPROC_SHIFT)
#define CP_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
#define CP_REG_ARM64_SYSREG_OP0_SHIFT 14
#define CP_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
#define CP_REG_ARM64_SYSREG_OP1_SHIFT 11
#define CP_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
#define CP_REG_ARM64_SYSREG_CRN_SHIFT 7
#define CP_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
#define CP_REG_ARM64_SYSREG_CRM_SHIFT 3
#define CP_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
#define CP_REG_ARM64_SYSREG_OP2_SHIFT 0
/* No kernel define but it's useful to QEMU */
#define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT)
#ifdef TARGET_AARCH64
MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64)
MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK)
MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK)
MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT)
#endif
#undef MISMATCH_CHECK
#endif

View file

@ -222,9 +222,9 @@ static int cpu_post_load(void *opaque, int version_id)
const VMStateDescription vmstate_arm_cpu = {
.name = "cpu",
.version_id = 13,
.minimum_version_id = 13,
.minimum_version_id_old = 13,
.version_id = 14,
.minimum_version_id = 14,
.minimum_version_id_old = 14,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
@ -253,9 +253,9 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
cpreg_vmstate_array_len,
0, vmstate_info_uint64, uint64_t),
VMSTATE_UINT32(env.exclusive_addr, ARMCPU),
VMSTATE_UINT32(env.exclusive_val, ARMCPU),
VMSTATE_UINT32(env.exclusive_high, ARMCPU),
VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
VMSTATE_UINT64(env.exclusive_val, ARMCPU),
VMSTATE_UINT64(env.exclusive_high, ARMCPU),
VMSTATE_UINT64(env.features, ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_PHYS], ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_VIRT], ARMCPU),

View file

@ -1765,18 +1765,6 @@ uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
}
/* NEON Float helpers. */
uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b, void *fpstp)
{
float_status *fpst = fpstp;
return float32_val(float32_min(make_float32(a), make_float32(b), fpst));
}
uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b, void *fpstp)
{
float_status *fpst = fpstp;
return float32_val(float32_max(make_float32(a), make_float32(b), fpst));
}
uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp)
{
float_status *fpst = fpstp;

File diff suppressed because it is too large Load diff

View file

@ -61,11 +61,10 @@ TCGv_ptr cpu_env;
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
static TCGv_i32 cpu_exclusive_addr;
static TCGv_i32 cpu_exclusive_val;
static TCGv_i32 cpu_exclusive_high;
static TCGv_i64 cpu_exclusive_addr;
static TCGv_i64 cpu_exclusive_val;
#ifdef CONFIG_USER_ONLY
static TCGv_i32 cpu_exclusive_test;
static TCGv_i64 cpu_exclusive_test;
static TCGv_i32 cpu_exclusive_info;
#endif
@ -96,14 +95,12 @@ void arm_translate_init(void)
cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_val), "exclusive_val");
cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUARMState, exclusive_high), "exclusive_high");
#ifdef CONFIG_USER_ONLY
cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_test), "exclusive_test");
cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUARMState, exclusive_info), "exclusive_info");
@ -1101,27 +1098,29 @@ VFP_GEN_FTOI(tosi)
VFP_GEN_FTOI(tosiz)
#undef VFP_GEN_FTOI
#define VFP_GEN_FIX(name) \
#define VFP_GEN_FIX(name, round) \
static inline void gen_vfp_##name(int dp, int shift, int neon) \
{ \
TCGv_i32 tmp_shift = tcg_const_i32(shift); \
TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
if (dp) { \
gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
statusptr); \
} else { \
gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
statusptr); \
} \
tcg_temp_free_i32(tmp_shift); \
tcg_temp_free_ptr(statusptr); \
}
VFP_GEN_FIX(tosh)
VFP_GEN_FIX(tosl)
VFP_GEN_FIX(touh)
VFP_GEN_FIX(toul)
VFP_GEN_FIX(shto)
VFP_GEN_FIX(slto)
VFP_GEN_FIX(uhto)
VFP_GEN_FIX(ulto)
VFP_GEN_FIX(tosh, _round_to_zero)
VFP_GEN_FIX(tosl, _round_to_zero)
VFP_GEN_FIX(touh, _round_to_zero)
VFP_GEN_FIX(toul, _round_to_zero)
VFP_GEN_FIX(shto, )
VFP_GEN_FIX(slto, )
VFP_GEN_FIX(uhto, )
VFP_GEN_FIX(ulto, )
#undef VFP_GEN_FIX
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
@ -2728,9 +2727,9 @@ static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
if (vmin) {
gen_helper_vfp_minnmd(dest, frn, frm, fpst);
gen_helper_vfp_minnumd(dest, frn, frm, fpst);
} else {
gen_helper_vfp_maxnmd(dest, frn, frm, fpst);
gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
}
tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
tcg_temp_free_i64(frn);
@ -2746,9 +2745,9 @@ static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
if (vmin) {
gen_helper_vfp_minnms(dest, frn, frm, fpst);
gen_helper_vfp_minnums(dest, frn, frm, fpst);
} else {
gen_helper_vfp_maxnms(dest, frn, frm, fpst);
gen_helper_vfp_maxnums(dest, frn, frm, fpst);
}
tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
tcg_temp_free_i32(frn);
@ -5124,9 +5123,9 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
{
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
if (size == 0) {
gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
} else {
gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
}
tcg_temp_free_ptr(fpstatus);
break;
@ -5136,9 +5135,9 @@ static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t ins
/* VMAXNM/VMINNM */
TCGv_ptr fpstatus = get_fpstatus_ptr(1);
if (size == 0) {
gen_helper_vfp_maxnms(tmp, tmp, tmp2, fpstatus);
gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
} else {
gen_helper_vfp_minnms(tmp, tmp, tmp2, fpstatus);
gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
}
tcg_temp_free_ptr(fpstatus);
} else {
@ -6498,7 +6497,6 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
const ARMCPRegInfo *ri;
ARMCPU *cpu = arm_env_get_cpu(env);
cpnum = (insn >> 8) & 0xf;
if (arm_feature(env, ARM_FEATURE_XSCALE)
@ -6541,11 +6539,11 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
isread = (insn >> 20) & 1;
rt = (insn >> 12) & 0xf;
ri = get_arm_cp_reginfo(cpu,
ri = get_arm_cp_reginfo(s->cp_regs,
ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
if (ri) {
/* Check access permissions */
if (!cp_access_ok(env, ri, isread)) {
if (!cp_access_ok(s->current_pl, ri, isread)) {
return 1;
}
@ -6759,30 +6757,34 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
default:
abort();
}
tcg_gen_mov_i32(cpu_exclusive_val, tmp);
store_reg(s, rt, tmp);
if (size == 3) {
TCGv_i32 tmp2 = tcg_temp_new_i32();
TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
tmp = tcg_temp_new_i32();
gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
tcg_temp_free_i32(tmp2);
tcg_gen_mov_i32(cpu_exclusive_high, tmp);
store_reg(s, rt2, tmp);
tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
store_reg(s, rt2, tmp3);
} else {
tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
}
tcg_gen_mov_i32(cpu_exclusive_addr, addr);
store_reg(s, rt, tmp);
tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
}
static void gen_clrex(DisasContext *s)
{
tcg_gen_movi_i32(cpu_exclusive_addr, -1);
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
}
#ifdef CONFIG_USER_ONLY
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 addr, int size)
{
tcg_gen_mov_i32(cpu_exclusive_test, addr);
tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
tcg_gen_movi_i32(cpu_exclusive_info,
size | (rd << 4) | (rt << 8) | (rt2 << 12));
gen_exception_insn(s, 4, EXCP_STREX);
@ -6792,6 +6794,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 addr, int size)
{
TCGv_i32 tmp;
TCGv_i64 val64, extaddr;
int done_label;
int fail_label;
@ -6803,7 +6806,11 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
} */
fail_label = gen_new_label();
done_label = gen_new_label();
tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
extaddr = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(extaddr, addr);
tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
tcg_temp_free_i64(extaddr);
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
@ -6819,17 +6826,24 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
default:
abort();
}
tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
tcg_temp_free_i32(tmp);
val64 = tcg_temp_new_i64();
if (size == 3) {
TCGv_i32 tmp2 = tcg_temp_new_i32();
TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
tmp = tcg_temp_new_i32();
gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
tcg_temp_free_i32(tmp2);
tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
tcg_temp_free_i32(tmp);
tcg_gen_concat_i32_i64(val64, tmp, tmp3);
tcg_temp_free_i32(tmp3);
} else {
tcg_gen_extu_i32_i64(val64, tmp);
}
tcg_temp_free_i32(tmp);
tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
tcg_temp_free_i64(val64);
tmp = load_reg(s, rt);
switch (size) {
case 0:
@ -6857,7 +6871,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
gen_set_label(fail_label);
tcg_gen_movi_i32(cpu_R[rd], 1);
gen_set_label(done_label);
tcg_gen_movi_i32(cpu_exclusive_addr, -1);
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
}
#endif
@ -10269,6 +10283,8 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
dc->cp_regs = cpu->cp_regs;
dc->current_pl = arm_current_pl(env);
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();

View file

@ -24,6 +24,8 @@ typedef struct DisasContext {
int vec_len;
int vec_stride;
int aarch64;
int current_pl;
GHashTable *cp_regs;
#define TMP_A64_MAX 16
int tmp_a64_count;
TCGv_i64 tmp_a64[TMP_A64_MAX];