ppc patch queue 2016-09-23

This pull request supersedes ppc-for-2.8-20160922.  There was a clang
 build error in that, and I've also added one extra patch in the new pull.
 
 Included in this set of ppc and spapr patches are:
     * TCG implementations for more POWER9 instructions
     * Some preliminary XICS fixes in preparataion for the pnv machine type
     * A significant ADB (Macintosh kbd/mouse) cleanup
     * Some conversions to use trace instead of debug macros
     * Fixes to correctly handle global TLB flush synchronization in
       TCG.  This is already a bug, but it will have much more impact
       when we get MTTCG
     * Add more qtest testcases for Power
     * Some MAINTAINERS updates
     * Assorted bugfixes
     * Add the basics of NUMA associativity to the spapr PCI host bridge
 
 This touches some test files and monitor.c which are technically
 outside the ppc code, but coming through this tree because the changes
 are primarily of interest to ppc.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJX5NZnAAoJEGw4ysog2bOSoLEP/1YpRFG/6gmiT+T+Btz1QYcd
 eqrJkV63/rY/lvgZOvUBdqA/YKaBSWDOEByNFRZ+Grqz9h5zKrRcmM7IWdRWg+vG
 gyrZUm1pscFG20iGNcenxB8mD0VMk7C77gnUlv12bo+mK+1D1i8eUfKLFqxb0kOx
 JGIRQNG5orF5vZxsyjRPVpvMS9gNG90vrPIypux4ryozCVMWbrjXRZNsPQKz8wb9
 UGcJIFB6R6JVbmBGchi434PEJkcdZzP/a0HvVSO51oGsFBnwYwQ7XVc3PyA4KCD7
 tTbm6T2Rpdak3Pcd/nuzoXCMBCkh48XGKxZ+yPuLXGG5ZGIZ6rzlHPqBsEqqiLz5
 DLzbsxKyLHX2Af87js4J9OXkoNQI4rVGurvNbkQ7IMQ2/Xt97kgUEgr3W0Vj+r82
 bqIqWm4OdJ9cDzTGVlQ7l2vLv6RMe7DrkeWRNEKZZgfir7Hgj1gr79BOe96ETKBd
 7r/1z0fBkZoWSq2OdjX8RouXMwd1Nq3FnqYv2BQ99rvM/AqpkY0HYsPIfUilHq6T
 ZXhvm/4LIEev0F/GiJvV5jHHg637QS4QqdyglF8ODC8vSMvOThhL9Gj7EMgJs7hj
 Ywt1B5y88//Zq4+IGVda98J5ynOZO1CArvzoYR5UMnWiq2K0Lxpq7wemE/finyIK
 0jWLqlmCmYRzsS+oQEg/
 =et1C
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.8-20160923' into staging

ppc patch queue 2016-09-23

This pull request supersedes ppc-for-2.8-20160922.  There was a clang
build error in that, and I've also added one extra patch in the new pull.

Included in this set of ppc and spapr patches are:
    * TCG implementations for more POWER9 instructions
    * Some preliminary XICS fixes in preparataion for the pnv machine type
    * A significant ADB (Macintosh kbd/mouse) cleanup
    * Some conversions to use trace instead of debug macros
    * Fixes to correctly handle global TLB flush synchronization in
      TCG.  This is already a bug, but it will have much more impact
      when we get MTTCG
    * Add more qtest testcases for Power
    * Some MAINTAINERS updates
    * Assorted bugfixes
    * Add the basics of NUMA associativity to the spapr PCI host bridge

This touches some test files and monitor.c which are technically
outside the ppc code, but coming through this tree because the changes
are primarily of interest to ppc.

# gpg: Signature made Fri 23 Sep 2016 08:14:47 BST
# gpg:                using RSA key 0x6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>"
# gpg:                 aka "David Gibson (Red Hat) <dgibson@redhat.com>"
# gpg:                 aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>"
# gpg:                 aka "David Gibson (kernel.org) <dwg@kernel.org>"
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E  87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-2.8-20160923: (45 commits)
  spapr_pci: Add numa node id
  monitor: fix crash for platforms without a CPU 0
  linux-user: ppc64: fix ARCH_206 bit in AT_HWCAP
  ppc/kvm: Mark 64kB page size support as disabled if not available
  ppc/xics: An ICS with offset 0 is assumed to be uninitialized
  ppc/xics: account correct irq status
  Enable H_CLEAR_MOD and H_CLEAR_REF hypercalls on KVM/PPC64.
  target-ppc: tlbie/tlbivax should have global effect
  target-ppc: add flag in check_tlb_flush()
  target-ppc: add TLB_NEED_LOCAL_FLUSH flag
  spapr: Introduce sPAPRCPUCoreClass
  target-ppc: implement darn instruction
  target-ppc: add stxsi[bh]x instruction
  target-ppc: add lxsi[bw]zx instruction
  target-ppc: add xxspltib instruction
  target-ppc: consolidate store conditional
  target-ppc: move out stqcx impementation
  target-ppc: consolidate load with reservation
  target-ppc: convert st[16,32,64]r to use new macro
  target-ppc: convert st64 to use new macro
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-09-23 14:26:12 +01:00
commit c229472af0
55 changed files with 1638 additions and 662 deletions

View file

@ -172,6 +172,7 @@ L: qemu-ppc@nongnu.org
S: Maintained
F: target-ppc/
F: hw/ppc/
F: include/hw/ppc/
F: disas/ppc.c
S390
@ -574,6 +575,9 @@ L: qemu-ppc@nongnu.org
S: Supported
F: hw/ppc/e500.[hc]
F: hw/ppc/e500plat.c
F: include/hw/ppc/ppc_e500.h
F: include/hw/pci-host/ppce500.h
F: pc-bios/u-boot.e500
mpc8544ds
M: Alexander Graf <agraf@suse.de>
@ -591,6 +595,8 @@ F: hw/ppc/mac_newworld.c
F: hw/pci-host/uninorth.c
F: hw/pci-bridge/dec.[hc]
F: hw/misc/macio/
F: include/hw/ppc/mac_dbdma.h
F: hw/nvram/mac_nvram.c
Old World
M: Alexander Graf <agraf@suse.de>
@ -618,6 +624,14 @@ F: include/hw/*/spapr*
F: hw/*/xics*
F: include/hw/*/xics*
F: pc-bios/spapr-rtas/*
F: pc-bios/spapr-rtas.bin
F: pc-bios/slof.bin
F: docs/specs/ppc-spapr-hcalls.txt
F: docs/specs/ppc-spapr-hotplug.txt
F: tests/spapr*
F: tests/libqos/*spapr*
F: tests/rtas*
F: tests/libqos/rtas*
virtex_ml507
M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
@ -815,6 +829,7 @@ M: Alexander Graf <agraf@suse.de>
L: qemu-ppc@nongnu.org
S: Odd Fixes
F: hw/ppc/ppc4*.c
F: include/hw/ppc/ppc4xx.h
ppce500
M: Alexander Graf <agraf@suse.de>

View file

@ -25,6 +25,9 @@
#include "hw/hw.h"
#include "hw/input/adb.h"
#include "ui/console.h"
#include "include/hw/input/adb-keys.h"
#include "ui/input.h"
#include "sysemu/sysemu.h"
/* debug ADB */
//#define DEBUG_ADB
@ -59,6 +62,9 @@ do { printf("ADB: " fmt , ## __VA_ARGS__); } while (0)
/* error codes */
#define ADB_RET_NOTPRESENT (-2)
/* The adb keyboard doesn't have every key imaginable */
#define NO_KEY 0xff
static void adb_device_reset(ADBDevice *d)
{
qdev_reset_all(DEVICE(d));
@ -187,23 +193,125 @@ typedef struct ADBKeyboardClass {
DeviceRealize parent_realize;
} ADBKeyboardClass;
static const uint8_t pc_to_adb_keycode[256] = {
0, 53, 18, 19, 20, 21, 23, 22, 26, 28, 25, 29, 27, 24, 51, 48,
12, 13, 14, 15, 17, 16, 32, 34, 31, 35, 33, 30, 36, 54, 0, 1,
2, 3, 5, 4, 38, 40, 37, 41, 39, 50, 56, 42, 6, 7, 8, 9,
11, 45, 46, 43, 47, 44,123, 67, 58, 49, 57,122,120, 99,118, 96,
97, 98,100,101,109, 71,107, 89, 91, 92, 78, 86, 87, 88, 69, 83,
84, 85, 82, 65, 0, 0, 10,103,111, 0, 0,110, 81, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 94, 0, 93, 0, 0, 0, 0, 0, 0,104,102, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 76,125, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,105, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 75, 0, 0,124, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,115, 62,116, 0, 59, 0, 60, 0,119,
61,121,114,117, 0, 0, 0, 0, 0, 0, 0, 55,126, 0,127, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
int qcode_to_adb_keycode[] = {
/* Make sure future additions are automatically set to NO_KEY */
[0 ... 0xff] = NO_KEY,
[Q_KEY_CODE_SHIFT] = ADB_KEY_LEFT_SHIFT,
[Q_KEY_CODE_SHIFT_R] = ADB_KEY_RIGHT_SHIFT,
[Q_KEY_CODE_ALT] = ADB_KEY_LEFT_OPTION,
[Q_KEY_CODE_ALT_R] = ADB_KEY_RIGHT_OPTION,
[Q_KEY_CODE_ALTGR] = ADB_KEY_RIGHT_OPTION,
[Q_KEY_CODE_CTRL] = ADB_KEY_LEFT_CONTROL,
[Q_KEY_CODE_CTRL_R] = ADB_KEY_RIGHT_CONTROL,
[Q_KEY_CODE_META_L] = ADB_KEY_COMMAND,
[Q_KEY_CODE_META_R] = ADB_KEY_COMMAND,
[Q_KEY_CODE_SPC] = ADB_KEY_SPACEBAR,
[Q_KEY_CODE_ESC] = ADB_KEY_ESC,
[Q_KEY_CODE_1] = ADB_KEY_1,
[Q_KEY_CODE_2] = ADB_KEY_2,
[Q_KEY_CODE_3] = ADB_KEY_3,
[Q_KEY_CODE_4] = ADB_KEY_4,
[Q_KEY_CODE_5] = ADB_KEY_5,
[Q_KEY_CODE_6] = ADB_KEY_6,
[Q_KEY_CODE_7] = ADB_KEY_7,
[Q_KEY_CODE_8] = ADB_KEY_8,
[Q_KEY_CODE_9] = ADB_KEY_9,
[Q_KEY_CODE_0] = ADB_KEY_0,
[Q_KEY_CODE_MINUS] = ADB_KEY_MINUS,
[Q_KEY_CODE_EQUAL] = ADB_KEY_EQUAL,
[Q_KEY_CODE_BACKSPACE] = ADB_KEY_DELETE,
[Q_KEY_CODE_TAB] = ADB_KEY_TAB,
[Q_KEY_CODE_Q] = ADB_KEY_Q,
[Q_KEY_CODE_W] = ADB_KEY_W,
[Q_KEY_CODE_E] = ADB_KEY_E,
[Q_KEY_CODE_R] = ADB_KEY_R,
[Q_KEY_CODE_T] = ADB_KEY_T,
[Q_KEY_CODE_Y] = ADB_KEY_Y,
[Q_KEY_CODE_U] = ADB_KEY_U,
[Q_KEY_CODE_I] = ADB_KEY_I,
[Q_KEY_CODE_O] = ADB_KEY_O,
[Q_KEY_CODE_P] = ADB_KEY_P,
[Q_KEY_CODE_BRACKET_LEFT] = ADB_KEY_LEFT_BRACKET,
[Q_KEY_CODE_BRACKET_RIGHT] = ADB_KEY_RIGHT_BRACKET,
[Q_KEY_CODE_RET] = ADB_KEY_RETURN,
[Q_KEY_CODE_A] = ADB_KEY_A,
[Q_KEY_CODE_S] = ADB_KEY_S,
[Q_KEY_CODE_D] = ADB_KEY_D,
[Q_KEY_CODE_F] = ADB_KEY_F,
[Q_KEY_CODE_G] = ADB_KEY_G,
[Q_KEY_CODE_H] = ADB_KEY_H,
[Q_KEY_CODE_J] = ADB_KEY_J,
[Q_KEY_CODE_K] = ADB_KEY_K,
[Q_KEY_CODE_L] = ADB_KEY_L,
[Q_KEY_CODE_SEMICOLON] = ADB_KEY_SEMICOLON,
[Q_KEY_CODE_APOSTROPHE] = ADB_KEY_APOSTROPHE,
[Q_KEY_CODE_GRAVE_ACCENT] = ADB_KEY_GRAVE_ACCENT,
[Q_KEY_CODE_BACKSLASH] = ADB_KEY_BACKSLASH,
[Q_KEY_CODE_Z] = ADB_KEY_Z,
[Q_KEY_CODE_X] = ADB_KEY_X,
[Q_KEY_CODE_C] = ADB_KEY_C,
[Q_KEY_CODE_V] = ADB_KEY_V,
[Q_KEY_CODE_B] = ADB_KEY_B,
[Q_KEY_CODE_N] = ADB_KEY_N,
[Q_KEY_CODE_M] = ADB_KEY_M,
[Q_KEY_CODE_COMMA] = ADB_KEY_COMMA,
[Q_KEY_CODE_DOT] = ADB_KEY_PERIOD,
[Q_KEY_CODE_SLASH] = ADB_KEY_FORWARD_SLASH,
[Q_KEY_CODE_ASTERISK] = ADB_KEY_KP_MULTIPLY,
[Q_KEY_CODE_CAPS_LOCK] = ADB_KEY_CAPS_LOCK,
[Q_KEY_CODE_F1] = ADB_KEY_F1,
[Q_KEY_CODE_F2] = ADB_KEY_F2,
[Q_KEY_CODE_F3] = ADB_KEY_F3,
[Q_KEY_CODE_F4] = ADB_KEY_F4,
[Q_KEY_CODE_F5] = ADB_KEY_F5,
[Q_KEY_CODE_F6] = ADB_KEY_F6,
[Q_KEY_CODE_F7] = ADB_KEY_F7,
[Q_KEY_CODE_F8] = ADB_KEY_F8,
[Q_KEY_CODE_F9] = ADB_KEY_F9,
[Q_KEY_CODE_F10] = ADB_KEY_F10,
[Q_KEY_CODE_F11] = ADB_KEY_F11,
[Q_KEY_CODE_F12] = ADB_KEY_F12,
[Q_KEY_CODE_PRINT] = ADB_KEY_F13,
[Q_KEY_CODE_SYSRQ] = ADB_KEY_F13,
[Q_KEY_CODE_SCROLL_LOCK] = ADB_KEY_F14,
[Q_KEY_CODE_PAUSE] = ADB_KEY_F15,
[Q_KEY_CODE_NUM_LOCK] = ADB_KEY_KP_CLEAR,
[Q_KEY_CODE_KP_EQUALS] = ADB_KEY_KP_EQUAL,
[Q_KEY_CODE_KP_DIVIDE] = ADB_KEY_KP_DIVIDE,
[Q_KEY_CODE_KP_MULTIPLY] = ADB_KEY_KP_MULTIPLY,
[Q_KEY_CODE_KP_SUBTRACT] = ADB_KEY_KP_SUBTRACT,
[Q_KEY_CODE_KP_ADD] = ADB_KEY_KP_PLUS,
[Q_KEY_CODE_KP_ENTER] = ADB_KEY_KP_ENTER,
[Q_KEY_CODE_KP_DECIMAL] = ADB_KEY_KP_PERIOD,
[Q_KEY_CODE_KP_0] = ADB_KEY_KP_0,
[Q_KEY_CODE_KP_1] = ADB_KEY_KP_1,
[Q_KEY_CODE_KP_2] = ADB_KEY_KP_2,
[Q_KEY_CODE_KP_3] = ADB_KEY_KP_3,
[Q_KEY_CODE_KP_4] = ADB_KEY_KP_4,
[Q_KEY_CODE_KP_5] = ADB_KEY_KP_5,
[Q_KEY_CODE_KP_6] = ADB_KEY_KP_6,
[Q_KEY_CODE_KP_7] = ADB_KEY_KP_7,
[Q_KEY_CODE_KP_8] = ADB_KEY_KP_8,
[Q_KEY_CODE_KP_9] = ADB_KEY_KP_9,
[Q_KEY_CODE_UP] = ADB_KEY_UP,
[Q_KEY_CODE_DOWN] = ADB_KEY_DOWN,
[Q_KEY_CODE_LEFT] = ADB_KEY_LEFT,
[Q_KEY_CODE_RIGHT] = ADB_KEY_RIGHT,
[Q_KEY_CODE_HELP] = ADB_KEY_HELP,
[Q_KEY_CODE_INSERT] = ADB_KEY_HELP,
[Q_KEY_CODE_DELETE] = ADB_KEY_FORWARD_DELETE,
[Q_KEY_CODE_HOME] = ADB_KEY_HOME,
[Q_KEY_CODE_END] = ADB_KEY_END,
[Q_KEY_CODE_PGUP] = ADB_KEY_PAGE_UP,
[Q_KEY_CODE_PGDN] = ADB_KEY_PAGE_DOWN,
[Q_KEY_CODE_POWER] = ADB_KEY_POWER
};
static void adb_kbd_put_keycode(void *opaque, int keycode)
@ -220,35 +328,40 @@ static void adb_kbd_put_keycode(void *opaque, int keycode)
static int adb_kbd_poll(ADBDevice *d, uint8_t *obuf)
{
static int ext_keycode;
KBDState *s = ADB_KEYBOARD(d);
int adb_keycode, keycode;
int keycode;
int olen;
olen = 0;
for(;;) {
if (s->count == 0)
break;
keycode = s->data[s->rptr];
if (++s->rptr == sizeof(s->data))
s->rptr = 0;
s->count--;
if (keycode == 0xe0) {
ext_keycode = 1;
} else {
if (ext_keycode)
adb_keycode = pc_to_adb_keycode[keycode | 0x80];
else
adb_keycode = pc_to_adb_keycode[keycode & 0x7f];
obuf[0] = adb_keycode | (keycode & 0x80);
/* NOTE: could put a second keycode if needed */
obuf[1] = 0xff;
olen = 2;
ext_keycode = 0;
break;
}
if (s->count == 0) {
return 0;
}
keycode = s->data[s->rptr];
s->rptr++;
if (s->rptr == sizeof(s->data)) {
s->rptr = 0;
}
s->count--;
/*
* The power key is the only two byte value key, so it is a special case.
* Since 0x7f is not a used keycode for ADB we overload it to indicate the
* power button when we're storing keycodes in our internal buffer, and
* expand it out to two bytes when we send to the guest.
*/
if (keycode == 0x7f) {
obuf[0] = 0x7f;
obuf[1] = 0x7f;
olen = 2;
} else {
obuf[0] = keycode;
/* NOTE: the power key key-up is the two byte sequence 0xff 0xff;
* otherwise we could in theory send a second keycode in the second
* byte, but choose not to bother.
*/
obuf[1] = 0xff;
olen = 2;
}
return olen;
}
@ -313,6 +426,29 @@ static int adb_kbd_request(ADBDevice *d, uint8_t *obuf,
return olen;
}
/* This is where keyboard events enter this file */
static void adb_keyboard_event(DeviceState *dev, QemuConsole *src,
InputEvent *evt)
{
KBDState *s = (KBDState *)dev;
int qcode, keycode;
qcode = qemu_input_key_value_to_qcode(evt->u.key.data->key);
if (qcode >= ARRAY_SIZE(qcode_to_adb_keycode)) {
return;
}
keycode = qcode_to_adb_keycode[qcode];
if (keycode == NO_KEY) { /* We don't want to send this to the guest */
ADB_DPRINTF("Ignoring NO_KEY\n");
return;
}
if (evt->u.key.data->down == false) { /* if key release event */
keycode = keycode | 0x80; /* create keyboard break code */
}
adb_kbd_put_keycode(s, keycode);
}
static const VMStateDescription vmstate_adb_kbd = {
.name = "adb_kbd",
.version_id = 2,
@ -340,14 +476,17 @@ static void adb_kbd_reset(DeviceState *dev)
s->count = 0;
}
static QemuInputHandler adb_keyboard_handler = {
.name = "QEMU ADB Keyboard",
.mask = INPUT_EVENT_MASK_KEY,
.event = adb_keyboard_event,
};
static void adb_kbd_realizefn(DeviceState *dev, Error **errp)
{
ADBDevice *d = ADB_DEVICE(dev);
ADBKeyboardClass *akc = ADB_KEYBOARD_GET_CLASS(dev);
akc->parent_realize(dev, errp);
qemu_add_kbd_event_handler(adb_kbd_put_keycode, d);
qemu_input_handler_register(dev, &adb_keyboard_handler);
}
static void adb_kbd_initfn(Object *obj)

View file

@ -505,8 +505,11 @@ static void ics_reject(ICSState *ics, int nr)
ICSIRQState *irq = ics->irqs + nr - ics->offset;
trace_xics_ics_reject(nr, nr - ics->offset);
irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
if (irq->flags & XICS_FLAGS_IRQ_MSI) {
irq->status |= XICS_STATUS_REJECTED;
} else if (irq->flags & XICS_FLAGS_IRQ_LSI) {
irq->status &= ~XICS_STATUS_SENT;
}
}
static void ics_resend(ICSState *ics)

View file

@ -34,20 +34,13 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "sysemu/sysemu.h"
#include "trace.h"
#include <libfdt.h>
#define ETH_ALEN 6
#define MAX_PACKET_SIZE 65536
/*#define DEBUG*/
#ifdef DEBUG
#define DPRINTF(fmt...) do { fprintf(stderr, fmt); } while (0)
#else
#define DPRINTF(fmt...)
#endif
/* Compatibility flags for migration */
#define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0
#define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT)
@ -158,8 +151,10 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(VIOsPAPRVLANDevice *dev,
return 0;
}
DPRINTF("Found buffer: pool=%d count=%d rxbufs=%d\n", pool,
dev->rx_pool[pool]->count, dev->rx_bufs);
trace_spapr_vlan_get_rx_bd_from_pool_found(pool,
dev->rx_pool[pool]->count,
dev->rx_bufs);
/* Remove the buffer from the pool */
dev->rx_pool[pool]->count--;
@ -186,8 +181,8 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_page(VIOsPAPRVLANDevice *dev,
}
bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr);
DPRINTF("use_buf_ptr=%d bd=0x%016llx\n",
buf_ptr, (unsigned long long)bd);
trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd);
} while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8)
&& buf_ptr != dev->use_buf_ptr);
@ -200,7 +195,7 @@ static vlan_bd_t spapr_vlan_get_rx_bd_from_page(VIOsPAPRVLANDevice *dev,
dev->use_buf_ptr = buf_ptr;
vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0);
DPRINTF("Found buffer: ptr=%d rxbufs=%d\n", dev->use_buf_ptr, dev->rx_bufs);
trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs);
return bd;
}
@ -215,8 +210,7 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
uint64_t handle;
uint8_t control;
DPRINTF("spapr_vlan_receive() [%s] rx_bufs=%d\n", sdev->qdev.id,
dev->rx_bufs);
trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs);
if (!dev->isopen) {
return -1;
@ -244,7 +238,7 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
return -1;
}
DPRINTF("spapr_vlan_receive: DMA write completed\n");
trace_spapr_vlan_receive_dma_completed();
/* Update the receive queue */
control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID;
@ -258,12 +252,11 @@ static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
DPRINTF("wrote rxq entry (ptr=0x%llx): 0x%016llx 0x%016llx\n",
(unsigned long long)dev->rxq_ptr,
(unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr),
(unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr + 8));
trace_spapr_vlan_receive_wrote(dev->rxq_ptr,
vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr),
vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr + 8));
dev->rxq_ptr += 16;
if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) {
@ -580,8 +573,8 @@ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev,
qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]),
rx_pool_size_compare);
pool = spapr_vlan_get_rx_pool_id(dev, size);
DPRINTF("created RX pool %d for size %lld\n", pool,
VLAN_BD_LEN(buf));
trace_spapr_vlan_add_rxbuf_to_pool_create(pool,
VLAN_BD_LEN(buf));
break;
}
}
@ -591,8 +584,8 @@ static target_long spapr_vlan_add_rxbuf_to_pool(VIOsPAPRVLANDevice *dev,
return H_RESOURCE;
}
DPRINTF("h_add_llan_buf(): Add buf using pool %i (size %lli, count=%i)\n",
pool, VLAN_BD_LEN(buf), dev->rx_pool[pool]->count);
trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf),
dev->rx_pool[pool]->count);
dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf;
@ -623,8 +616,7 @@ static target_long spapr_vlan_add_rxbuf_to_page(VIOsPAPRVLANDevice *dev,
vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf);
DPRINTF("h_add_llan_buf(): Added buf ptr=%d rx_bufs=%d bd=0x%016llx\n",
dev->add_buf_ptr, dev->rx_bufs, (unsigned long long)buf);
trace_spapr_vlan_add_rxbuf_to_page(dev->add_buf_ptr, dev->rx_bufs, buf);
return 0;
}
@ -640,8 +632,7 @@ static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
VIOsPAPRVLANDevice *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
target_long ret;
DPRINTF("H_ADD_LOGICAL_LAN_BUFFER(0x" TARGET_FMT_lx
", 0x" TARGET_FMT_lx ")\n", reg, buf);
trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf);
if (!sdev) {
hcall_dprintf("Bad device\n");
@ -694,14 +685,13 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
int i, nbufs;
int ret;
DPRINTF("H_SEND_LOGICAL_LAN(0x" TARGET_FMT_lx ", <bufs>, 0x"
TARGET_FMT_lx ")\n", reg, continue_token);
trace_spapr_vlan_h_send_logical_lan(reg, continue_token);
if (!sdev) {
return H_PARAMETER;
}
DPRINTF("rxbufs = %d\n", dev->rx_bufs);
trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs);
if (!dev->isopen) {
return H_DROPPED;
@ -713,7 +703,7 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
total_len = 0;
for (i = 0; i < 6; i++) {
DPRINTF(" buf desc: 0x" TARGET_FMT_lx "\n", bufs[i]);
trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]);
if (!(bufs[i] & VLAN_BD_VALID)) {
break;
}
@ -721,8 +711,7 @@ static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
}
nbufs = i;
DPRINTF("h_send_logical_lan() %d buffers, total length 0x%x\n",
nbufs, total_len);
trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len);
if (total_len == 0) {
return H_SUCCESS;

View file

@ -270,3 +270,19 @@ e1000e_cfg_support_virtio(bool support) "Virtio header supported: %d"
e1000e_vm_state_running(void) "VM state is running"
e1000e_vm_state_stopped(void) "VM state is stopped"
# hw/net/spapr_llan.c
spapr_vlan_get_rx_bd_from_pool_found(int pool, int32_t count, uint32_t rx_bufs) "pool=%d count=%"PRId32" rxbufs=%"PRIu32
spapr_vlan_get_rx_bd_from_page(int buf_ptr, uint64_t bd) "use_buf_ptr=%d bd=0x%016"PRIx64
spapr_vlan_get_rx_bd_from_page_found(uint32_t use_buf_ptr, uint32_t rx_bufs) "ptr=%"PRIu32" rxbufs=%"PRIu32
spapr_vlan_receive(const char *id, uint32_t rx_bufs) "[%s] rx_bufs=%"PRIu32
spapr_vlan_receive_dma_completed(void) "DMA write completed"
spapr_vlan_receive_wrote(uint64_t ptr, uint64_t hi, uint64_t lo) "rxq entry (ptr=0x%"PRIx64"): 0x%016"PRIx64" 0x%016"PRIx64
spapr_vlan_add_rxbuf_to_pool_create(int pool, uint64_t len) "created RX pool %d for size %"PRIu64
spapr_vlan_add_rxbuf_to_pool(int pool, uint64_t len, int32_t count) "add buf using pool %d (size %"PRIu64", count=%"PRId32")"
spapr_vlan_add_rxbuf_to_page(uint32_t ptr, uint32_t rx_bufs, uint64_t bd) "added buf ptr=%"PRIu32" rx_bufs=%"PRIu32" bd=0x%016"PRIx64
spapr_vlan_h_add_logical_lan_buffer(uint64_t reg, uint64_t buf) "H_ADD_LOGICAL_LAN_BUFFER(0x%"PRIx64", 0x%"PRIx64")"
spapr_vlan_h_send_logical_lan(uint64_t reg, uint64_t continue_token) "H_SEND_LOGICAL_LAN(0x%"PRIx64", <bufs>, 0x%"PRIx64")"
spapr_vlan_h_send_logical_lan_rxbufs(uint32_t rx_bufs) "rxbufs = %"PRIu32
spapr_vlan_h_send_logical_lan_buf_desc(uint64_t buf) " buf desc: 0x%"PRIx64
spapr_vlan_h_send_logical_lan_total(int nbufs, unsigned total_len) "%d buffers, total length 0x%x"

View file

@ -1809,6 +1809,9 @@ static void ppc_spapr_init(MachineState *machine)
/* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
kvmppc_enable_logical_ci_hcalls();
kvmppc_enable_set_mode_hcall();
/* H_CLEAR_MOD/_REF are mandatory in PAPR, but off by default */
kvmppc_enable_clear_ref_mod_hcalls();
}
/* allocate RAM */
@ -2307,8 +2310,8 @@ static void spapr_machine_device_pre_plug(HotplugHandler *hotplug_dev,
}
}
static HotplugHandler *spapr_get_hotpug_handler(MachineState *machine,
DeviceState *dev)
static HotplugHandler *spapr_get_hotplug_handler(MachineState *machine,
DeviceState *dev)
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) {
@ -2380,7 +2383,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
mc->kvm_type = spapr_kvm_type;
mc->has_dynamic_sysbus = true;
mc->pci_allow_0_address = true;
mc->get_hotplug_handler = spapr_get_hotpug_handler;
mc->get_hotplug_handler = spapr_get_hotplug_handler;
hc->pre_plug = spapr_machine_device_pre_plug;
hc->plug = spapr_machine_device_plug;
hc->unplug = spapr_machine_device_unplug;

View file

@ -112,7 +112,8 @@ char *spapr_get_cpu_core_type(const char *model)
static void spapr_core_release(DeviceState *dev, void *opaque)
{
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
const char *typename = object_class_get_name(sc->cpu_class);
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev));
const char *typename = object_class_get_name(scc->cpu_class);
size_t size = object_type_get_instance_size(typename);
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
CPUCore *cc = CPU_CORE(dev);
@ -287,8 +288,9 @@ static void spapr_cpu_core_realize_child(Object *child, Error **errp)
static void spapr_cpu_core_realize(DeviceState *dev, Error **errp)
{
sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(dev));
CPUCore *cc = CPU_CORE(OBJECT(dev));
const char *typename = object_class_get_name(sc->cpu_class);
const char *typename = object_class_get_name(scc->cpu_class);
size_t size = object_type_get_instance_size(typename);
Error *local_err = NULL;
void *obj;
@ -331,83 +333,43 @@ err:
error_propagate(errp, local_err);
}
static void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
dc->realize = spapr_cpu_core_realize;
}
/*
* instance_init routines from different flavours of sPAPR CPU cores.
*/
#define SPAPR_CPU_CORE_INITFN(_type, _fname) \
static void glue(glue(spapr_cpu_core_, _fname), _initfn(Object *obj)) \
{ \
sPAPRCPUCore *core = SPAPR_CPU_CORE(obj); \
char *name = g_strdup_printf("%s-" TYPE_POWERPC_CPU, stringify(_type)); \
ObjectClass *oc = object_class_by_name(name); \
g_assert(oc); \
g_free((void *)name); \
core->cpu_class = oc; \
}
SPAPR_CPU_CORE_INITFN(970mp_v1.0, 970MP_v10);
SPAPR_CPU_CORE_INITFN(970mp_v1.1, 970MP_v11);
SPAPR_CPU_CORE_INITFN(970_v2.2, 970);
SPAPR_CPU_CORE_INITFN(POWER5+_v2.1, POWER5plus);
SPAPR_CPU_CORE_INITFN(POWER7_v2.3, POWER7);
SPAPR_CPU_CORE_INITFN(POWER7+_v2.1, POWER7plus);
SPAPR_CPU_CORE_INITFN(POWER8_v2.0, POWER8);
SPAPR_CPU_CORE_INITFN(POWER8E_v2.1, POWER8E);
SPAPR_CPU_CORE_INITFN(POWER8NVL_v1.0, POWER8NVL);
typedef struct SPAPRCoreInfo {
const char *name;
void (*initfn)(Object *obj);
} SPAPRCoreInfo;
static const SPAPRCoreInfo spapr_cores[] = {
static const char *spapr_core_models[] = {
/* 970 */
{ .name = "970_v2.2", .initfn = spapr_cpu_core_970_initfn },
"970_v2.2",
/* 970MP variants */
{ .name = "970MP_v1.0", .initfn = spapr_cpu_core_970MP_v10_initfn },
{ .name = "970mp_v1.0", .initfn = spapr_cpu_core_970MP_v10_initfn },
{ .name = "970MP_v1.1", .initfn = spapr_cpu_core_970MP_v11_initfn },
{ .name = "970mp_v1.1", .initfn = spapr_cpu_core_970MP_v11_initfn },
"970MP_v1.0",
"970mp_v1.0",
"970MP_v1.1",
"970mp_v1.1",
/* POWER5+ */
{ .name = "POWER5+_v2.1", .initfn = spapr_cpu_core_POWER5plus_initfn },
"POWER5+_v2.1",
/* POWER7 */
{ .name = "POWER7_v2.3", .initfn = spapr_cpu_core_POWER7_initfn },
"POWER7_v2.3",
/* POWER7+ */
{ .name = "POWER7+_v2.1", .initfn = spapr_cpu_core_POWER7plus_initfn },
"POWER7+_v2.1",
/* POWER8 */
{ .name = "POWER8_v2.0", .initfn = spapr_cpu_core_POWER8_initfn },
"POWER8_v2.0",
/* POWER8E */
{ .name = "POWER8E_v2.1", .initfn = spapr_cpu_core_POWER8E_initfn },
"POWER8E_v2.1",
/* POWER8NVL */
{ .name = "POWER8NVL_v1.0", .initfn = spapr_cpu_core_POWER8NVL_initfn },
{ .name = NULL }
"POWER8NVL_v1.0",
};
static void spapr_cpu_core_register(const SPAPRCoreInfo *info)
void spapr_cpu_core_class_init(ObjectClass *oc, void *data)
{
TypeInfo type_info = {
.parent = TYPE_SPAPR_CPU_CORE,
.instance_size = sizeof(sPAPRCPUCore),
.instance_init = info->initfn,
};
DeviceClass *dc = DEVICE_CLASS(oc);
sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_CLASS(oc);
type_info.name = g_strdup_printf("%s-" TYPE_SPAPR_CPU_CORE, info->name);
type_register(&type_info);
g_free((void *)type_info.name);
dc->realize = spapr_cpu_core_realize;
scc->cpu_class = cpu_class_by_name(TYPE_POWERPC_CPU, data);
g_assert(scc->cpu_class);
}
static const TypeInfo spapr_cpu_core_type_info = {
@ -415,17 +377,27 @@ static const TypeInfo spapr_cpu_core_type_info = {
.parent = TYPE_CPU_CORE,
.abstract = true,
.instance_size = sizeof(sPAPRCPUCore),
.class_init = spapr_cpu_core_class_init,
.class_size = sizeof(sPAPRCPUCoreClass),
};
static void spapr_cpu_core_register_types(void)
{
const SPAPRCoreInfo *info = spapr_cores;
int i;
type_register_static(&spapr_cpu_core_type_info);
while (info->name) {
spapr_cpu_core_register(info);
info++;
for (i = 0; i < ARRAY_SIZE(spapr_core_models); i++) {
TypeInfo type_info = {
.parent = TYPE_SPAPR_CPU_CORE,
.instance_size = sizeof(sPAPRCPUCore),
.class_init = spapr_cpu_core_class_init,
.class_data = (void *) spapr_core_models[i],
};
type_info.name = g_strdup_printf("%s-" TYPE_SPAPR_CPU_CORE,
spapr_core_models[i]);
type_register(&type_info);
g_free((void *)type_info.name);
}
}

View file

@ -20,20 +20,7 @@
#include "qapi/visitor.h"
#include "qemu/error-report.h"
#include "hw/ppc/spapr.h" /* for RTAS return codes */
/* #define DEBUG_SPAPR_DRC */
#ifdef DEBUG_SPAPR_DRC
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#define DPRINTFN(fmt, ...) \
do { DPRINTF(fmt, ## __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#define DPRINTFN(fmt, ...) \
do { } while (0)
#endif
#include "trace.h"
#define DRC_CONTAINER_PATH "/dr-connector"
#define DRC_INDEX_TYPE_SHIFT 28
@ -69,7 +56,7 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
DPRINTFN("drc: %x, set_isolation_state: %x", get_index(drc), state);
trace_spapr_drc_set_isolation_state(get_index(drc), state);
if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
/* cannot unisolate a non-existant resource, and, or resources
@ -94,11 +81,11 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
*/
if (drc->awaiting_release) {
if (drc->configured) {
DPRINTFN("finalizing device removal");
trace_spapr_drc_set_isolation_state_finalizing(get_index(drc));
drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
drc->detach_cb_opaque, NULL);
} else {
DPRINTFN("deferring device removal on unconfigured device\n");
trace_spapr_drc_set_isolation_state_deferring(get_index(drc));
}
}
drc->configured = false;
@ -110,7 +97,7 @@ static uint32_t set_isolation_state(sPAPRDRConnector *drc,
static uint32_t set_indicator_state(sPAPRDRConnector *drc,
sPAPRDRIndicatorState state)
{
DPRINTFN("drc: %x, set_indicator_state: %x", get_index(drc), state);
trace_spapr_drc_set_indicator_state(get_index(drc), state);
drc->indicator_state = state;
return RTAS_OUT_SUCCESS;
}
@ -120,7 +107,7 @@ static uint32_t set_allocation_state(sPAPRDRConnector *drc,
{
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
DPRINTFN("drc: %x, set_allocation_state: %x", get_index(drc), state);
trace_spapr_drc_set_allocation_state(get_index(drc), state);
if (state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
/* if there's no resource/device associated with the DRC, there's
@ -137,7 +124,7 @@ static uint32_t set_allocation_state(sPAPRDRConnector *drc,
drc->allocation_state = state;
if (drc->awaiting_release &&
drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
DPRINTFN("finalizing device removal");
trace_spapr_drc_set_allocation_state_finalizing(get_index(drc));
drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
drc->detach_cb_opaque, NULL);
} else if (drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
@ -167,12 +154,11 @@ static const void *get_fdt(sPAPRDRConnector *drc, int *fdt_start_offset)
static void set_configured(sPAPRDRConnector *drc)
{
DPRINTFN("drc: %x, set_configured", get_index(drc));
trace_spapr_drc_set_configured(get_index(drc));
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
/* guest should be not configuring an isolated device */
DPRINTFN("drc: %x, set_configured: skipping isolated device",
get_index(drc));
trace_spapr_drc_set_configured_skipping(get_index(drc));
return;
}
drc->configured = true;
@ -222,7 +208,7 @@ static uint32_t entity_sense(sPAPRDRConnector *drc, sPAPRDREntitySense *state)
}
}
DPRINTFN("drc: %x, entity_sense: %x", get_index(drc), state);
trace_spapr_drc_entity_sense(get_index(drc), *state);
return RTAS_OUT_SUCCESS;
}
@ -336,7 +322,7 @@ static void prop_get_fdt(Object *obj, Visitor *v, const char *name,
static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
int fdt_start_offset, bool coldplug, Error **errp)
{
DPRINTFN("drc: %x, attach", get_index(drc));
trace_spapr_drc_attach(get_index(drc));
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
error_setg(errp, "an attached device is still awaiting release");
@ -389,7 +375,7 @@ static void detach(sPAPRDRConnector *drc, DeviceState *d,
spapr_drc_detach_cb *detach_cb,
void *detach_cb_opaque, Error **errp)
{
DPRINTFN("drc: %x, detach", get_index(drc));
trace_spapr_drc_detach(get_index(drc));
drc->detach_cb = detach_cb;
drc->detach_cb_opaque = detach_cb_opaque;
@ -415,21 +401,21 @@ static void detach(sPAPRDRConnector *drc, DeviceState *d,
}
if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
DPRINTFN("awaiting transition to isolated state before removal");
trace_spapr_drc_awaiting_isolated(get_index(drc));
drc->awaiting_release = true;
return;
}
if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
DPRINTFN("awaiting transition to unusable state before removal");
trace_spapr_drc_awaiting_unusable(get_index(drc));
drc->awaiting_release = true;
return;
}
if (drc->awaiting_allocation) {
drc->awaiting_release = true;
DPRINTFN("awaiting allocation to complete before removal");
trace_spapr_drc_awaiting_allocation(get_index(drc));
return;
}
@ -460,7 +446,7 @@ static void reset(DeviceState *d)
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
sPAPRDREntitySense state;
DPRINTFN("drc reset: %x", drck->get_index(drc));
trace_spapr_drc_reset(drck->get_index(drc));
/* immediately upon reset we can safely assume DRCs whose devices
* are pending removal can be safely removed, and that they will
* subsequently be left in an ISOLATED state. move the DRC to this
@ -502,7 +488,7 @@ static void realize(DeviceState *d, Error **errp)
gchar *child_name;
Error *err = NULL;
DPRINTFN("drc realize: %x", drck->get_index(drc));
trace_spapr_drc_realize(drck->get_index(drc));
/* NOTE: we do this as part of realize/unrealize due to the fact
* that the guest will communicate with the DRC via RTAS calls
* referencing the global DRC index. By unlinking the DRC
@ -513,7 +499,7 @@ static void realize(DeviceState *d, Error **errp)
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
snprintf(link_name, sizeof(link_name), "%x", drck->get_index(drc));
child_name = object_get_canonical_path_component(OBJECT(drc));
DPRINTFN("drc child name: %s", child_name);
trace_spapr_drc_realize_child(drck->get_index(drc), child_name);
object_property_add_alias(root_container, link_name,
drc->owner, child_name, &err);
if (err) {
@ -521,7 +507,7 @@ static void realize(DeviceState *d, Error **errp)
object_unref(OBJECT(drc));
}
g_free(child_name);
DPRINTFN("drc realize complete");
trace_spapr_drc_realize_complete(drck->get_index(drc));
}
static void unrealize(DeviceState *d, Error **errp)
@ -532,7 +518,7 @@ static void unrealize(DeviceState *d, Error **errp)
char name[256];
Error *err = NULL;
DPRINTFN("drc unrealize: %x", drck->get_index(drc));
trace_spapr_drc_unrealize(drck->get_index(drc));
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
snprintf(name, sizeof(name), "%x", drck->get_index(drc));
object_property_del(root_container, name, &err);

View file

@ -201,7 +201,7 @@ static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
switch (ret) {
case REMOVE_SUCCESS:
check_tlb_flush(env);
check_tlb_flush(env, true);
return H_SUCCESS;
case REMOVE_NOT_FOUND:
@ -282,7 +282,7 @@ static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
}
}
exit:
check_tlb_flush(env);
check_tlb_flush(env, true);
return rc;
}
@ -319,6 +319,8 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
ppc_hash64_store_hpte(cpu, pte_index,
(v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
/* Flush the tlb */
check_tlb_flush(env, true);
/* Don't need a memory barrier, due to qemu's global lock */
ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
return H_SUCCESS;

View file

@ -47,6 +47,7 @@
#include "sysemu/device_tree.h"
#include "sysemu/kvm.h"
#include "sysemu/hostmem.h"
#include "sysemu/numa.h"
#include "hw/vfio/vfio.h"
@ -1544,6 +1545,7 @@ static Property spapr_phb_properties[] = {
DEFINE_PROP_BOOL("ddw", sPAPRPHBState, ddw_enabled, true),
DEFINE_PROP_UINT64("pgsz", sPAPRPHBState, page_size_mask,
(1ULL << 12) | (1ULL << 16)),
DEFINE_PROP_UINT32("numa_node", sPAPRPHBState, numa_node, -1),
DEFINE_PROP_END_OF_LIST(),
};
@ -1805,6 +1807,11 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
cpu_to_be32(1),
cpu_to_be32(RTAS_IBM_RESET_PE_DMA_WINDOW)
};
uint32_t associativity[] = {cpu_to_be32(0x4),
cpu_to_be32(0x0),
cpu_to_be32(0x0),
cpu_to_be32(0x0),
cpu_to_be32(phb->numa_node)};
sPAPRTCETable *tcet;
PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
sPAPRFDT s_fdt;
@ -1837,6 +1844,12 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
&ddw_extensions, sizeof(ddw_extensions)));
}
/* Advertise NUMA via ibm,associativity */
if (nb_numa_nodes > 1) {
_FDT(fdt_setprop(fdt, bus_off, "ibm,associativity", associativity,
sizeof(associativity)));
}
/* Build the interrupt-map, this must matches what is done
* in pci_spapr_map_irq
*/

View file

@ -37,6 +37,7 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/ppc/spapr_rtas.h"
#include "hw/ppc/ppc.h"
#include "qapi-event.h"
#include "hw/boards.h"
@ -44,16 +45,7 @@
#include <libfdt.h>
#include "hw/ppc/spapr_drc.h"
#include "qemu/cutils.h"
/* #define DEBUG_SPAPR */
#ifdef DEBUG_SPAPR
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
#include "trace.h"
static sPAPRConfigureConnectorState *spapr_ccs_find(sPAPRMachineState *spapr,
uint32_t drc_index)
@ -436,8 +428,7 @@ static void rtas_set_indicator(PowerPCCPU *cpu, sPAPRMachineState *spapr,
/* if this is a DR sensor we can assume sensor_index == drc_index */
drc = spapr_dr_connector_by_index(sensor_index);
if (!drc) {
DPRINTF("rtas_set_indicator: invalid sensor/DRC index: %xh\n",
sensor_index);
trace_spapr_rtas_set_indicator_invalid(sensor_index);
ret = RTAS_OUT_PARAM_ERROR;
goto out;
}
@ -476,8 +467,7 @@ out:
out_unimplemented:
/* currently only DR-related sensors are implemented */
DPRINTF("rtas_set_indicator: sensor/indicator not implemented: %d\n",
sensor_type);
trace_spapr_rtas_set_indicator_not_supported(sensor_index, sensor_type);
rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
}
@ -503,16 +493,15 @@ static void rtas_get_sensor_state(PowerPCCPU *cpu, sPAPRMachineState *spapr,
if (sensor_type != RTAS_SENSOR_TYPE_ENTITY_SENSE) {
/* currently only DR-related sensors are implemented */
DPRINTF("rtas_get_sensor_state: sensor/indicator not implemented: %d\n",
sensor_type);
trace_spapr_rtas_get_sensor_state_not_supported(sensor_index,
sensor_type);
ret = RTAS_OUT_NOT_SUPPORTED;
goto out;
}
drc = spapr_dr_connector_by_index(sensor_index);
if (!drc) {
DPRINTF("rtas_get_sensor_state: invalid sensor/DRC index: %xh\n",
sensor_index);
trace_spapr_rtas_get_sensor_state_invalid(sensor_index);
ret = RTAS_OUT_PARAM_ERROR;
goto out;
}
@ -569,8 +558,7 @@ static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
drc_index = rtas_ld(wa_addr, 0);
drc = spapr_dr_connector_by_index(drc_index);
if (!drc) {
DPRINTF("rtas_ibm_configure_connector: invalid DRC index: %xh\n",
drc_index);
trace_spapr_rtas_ibm_configure_connector_invalid(drc_index);
rc = RTAS_OUT_PARAM_ERROR;
goto out;
}
@ -578,8 +566,7 @@ static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
fdt = drck->get_fdt(drc, NULL);
if (!fdt) {
DPRINTF("rtas_ibm_configure_connector: Missing FDT for DRC index: %xh\n",
drc_index);
trace_spapr_rtas_ibm_configure_connector_missing_fdt(drc_index);
rc = SPAPR_DR_CC_RESPONSE_NOT_CONFIGURABLE;
goto out;
}
@ -693,6 +680,24 @@ target_ulong spapr_rtas_call(PowerPCCPU *cpu, sPAPRMachineState *spapr,
return H_PARAMETER;
}
uint64_t qtest_rtas_call(char *cmd, uint32_t nargs, uint64_t args,
uint32_t nret, uint64_t rets)
{
int token;
for (token = 0; token < RTAS_TOKEN_MAX - RTAS_TOKEN_BASE; token++) {
if (strcmp(cmd, rtas_table[token].name) == 0) {
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
rtas_table[token].fn(cpu, spapr, token + RTAS_TOKEN_BASE,
nargs, args, nret, rets);
return H_SUCCESS;
}
}
return H_PARAMETER;
}
void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn)
{
assert((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX));

View file

@ -36,19 +36,10 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/ppc/xics.h"
#include "trace.h"
#include <libfdt.h>
/* #define DEBUG_SPAPR */
#ifdef DEBUG_SPAPR
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
static Property spapr_vio_props[] = {
DEFINE_PROP_UINT32("irq", VIOsPAPRDevice, irq, 0), \
DEFINE_PROP_END_OF_LIST(),
@ -202,9 +193,7 @@ static target_ulong h_reg_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr,
dev->crq.qsize = queue_len;
dev->crq.qnext = 0;
DPRINTF("CRQ for dev 0x" TARGET_FMT_lx " registered at 0x"
TARGET_FMT_lx "/0x" TARGET_FMT_lx "\n",
reg, queue_addr, queue_len);
trace_spapr_vio_h_reg_crq(reg, queue_addr, queue_len);
return H_SUCCESS;
}
@ -214,7 +203,7 @@ static target_ulong free_crq(VIOsPAPRDevice *dev)
dev->crq.qsize = 0;
dev->crq.qnext = 0;
DPRINTF("CRQ for dev 0x%" PRIx32 " freed\n", dev->reg);
trace_spapr_vio_free_crq(dev->reg);
return H_SUCCESS;
}

View file

@ -35,6 +35,39 @@ spapr_iommu_ddw_create(uint64_t buid, uint32_t cfgaddr, uint64_t pg_size, uint64
spapr_iommu_ddw_remove(uint32_t liobn) "liobn=%"PRIx32
spapr_iommu_ddw_reset(uint64_t buid, uint32_t cfgaddr) "buid=%"PRIx64" addr=%"PRIx32
# hw/ppc/spapr_drc.c
spapr_drc_set_isolation_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: %"PRIx32
spapr_drc_set_isolation_state_finalizing(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_set_isolation_state_deferring(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_set_indicator_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x"
spapr_drc_set_allocation_state(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x"
spapr_drc_set_allocation_state_finalizing(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_set_configured(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_set_configured_skipping(uint32_t index) "drc: 0x%"PRIx32", isolated device"
spapr_drc_entity_sense(uint32_t index, int state) "drc: 0x%"PRIx32", state: 0x%x"
spapr_drc_attach(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_detach(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_awaiting_isolated(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_awaiting_unusable(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_awaiting_allocation(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_reset(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_realize(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_realize_child(uint32_t index, char *childname) "drc: 0x%"PRIx32", child name: %s"
spapr_drc_realize_complete(uint32_t index) "drc: 0x%"PRIx32
spapr_drc_unrealize(uint32_t index) "drc: 0x%"PRIx32
# hw/ppc/spapr_rtas.c
spapr_rtas_set_indicator_invalid(uint32_t index) "sensor index: 0x%"PRIx32
spapr_rtas_set_indicator_not_supported(uint32_t index, uint32_t type) "sensor index: 0x%"PRIx32", type: %"PRIu32
spapr_rtas_get_sensor_state_not_supported(uint32_t index, uint32_t type) "sensor index: 0x%"PRIx32", type: %"PRIu32
spapr_rtas_get_sensor_state_invalid(uint32_t index) "sensor index: 0x%"PRIx32
spapr_rtas_ibm_configure_connector_invalid(uint32_t index) "DRC index: 0x%"PRIx32
spapr_rtas_ibm_configure_connector_missing_fdt(uint32_t index) "DRC index: 0x%"PRIx32
# hw/ppc/spapr_vio.c
spapr_vio_h_reg_crq(uint64_t reg, uint64_t queue_addr, uint64_t queue_len) "CRQ for dev 0x%" PRIx64 " registered at 0x%" PRIx64 "/0x%" PRIx64
spapr_vio_free_crq(uint32_t reg) "CRQ for dev 0x%" PRIx32 " freed"
# hw/ppc/ppc.c
ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)"

View file

@ -42,19 +42,10 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "viosrp.h"
#include "trace.h"
#include <libfdt.h>
/*#define DEBUG_VSCSI*/
#ifdef DEBUG_VSCSI
#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
/*
* Virtual SCSI device
*/
@ -237,8 +228,7 @@ static int vscsi_send_rsp(VSCSIState *s, vscsi_req *req,
int total_len = sizeof(iu->srp.rsp);
uint8_t sol_not = iu->srp.cmd.sol_not;
DPRINTF("VSCSI: Sending resp status: 0x%x, "
"res_in: %d, res_out: %d\n", status, res_in, res_out);
trace_spapr_vscsi_send_rsp(status, res_in, res_out);
memset(iu, 0, sizeof(struct srp_rsp));
iu->srp.rsp.opcode = SRP_RSP;
@ -298,13 +288,13 @@ static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
switch (req->dma_fmt) {
case SRP_NO_DATA_DESC: {
DPRINTF("VSCSI: no data descriptor\n");
trace_spapr_vscsi_fetch_desc_no_data();
return 0;
}
case SRP_DATA_DESC_DIRECT: {
memcpy(ret, cmd->add_data + req->cdb_offset, sizeof(*ret));
assert(req->cur_desc_num == 0);
DPRINTF("VSCSI: direct segment\n");
trace_spapr_vscsi_fetch_desc_direct();
break;
}
case SRP_DATA_DESC_INDIRECT: {
@ -312,30 +302,29 @@ static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
(cmd->add_data + req->cdb_offset);
if (n < req->local_desc) {
*ret = tmp->desc_list[n];
DPRINTF("VSCSI: indirect segment local tag=0x%x desc#%d/%d\n",
req->qtag, n, req->local_desc);
trace_spapr_vscsi_fetch_desc_indirect(req->qtag, n,
req->local_desc);
} else if (n < req->total_desc) {
int rc;
struct srp_direct_buf tbl_desc = vscsi_swap_desc(tmp->table_desc);
unsigned desc_offset = n * sizeof(struct srp_direct_buf);
if (desc_offset >= tbl_desc.len) {
DPRINTF("VSCSI: #%d is ouf of range (%d bytes)\n",
n, desc_offset);
trace_spapr_vscsi_fetch_desc_out_of_range(n, desc_offset);
return -1;
}
rc = spapr_vio_dma_read(&s->vdev, tbl_desc.va + desc_offset,
ret, sizeof(struct srp_direct_buf));
if (rc) {
DPRINTF("VSCSI: spapr_vio_dma_read -> %d reading ext_desc\n",
rc);
trace_spapr_vscsi_fetch_desc_dma_read_error(rc);
return -1;
}
DPRINTF("VSCSI: indirect segment ext. tag=0x%x desc#%d/%d { va=%"PRIx64" len=%x }\n",
req->qtag, n, req->total_desc, tbl_desc.va, tbl_desc.len);
trace_spapr_vscsi_fetch_desc_indirect_seg_ext(req->qtag, n,
req->total_desc,
tbl_desc.va,
tbl_desc.len);
} else {
DPRINTF("VSCSI: Out of descriptors !\n");
trace_spapr_vscsi_fetch_desc_out_of_desc();
return 0;
}
break;
@ -347,15 +336,16 @@ static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
*ret = vscsi_swap_desc(*ret);
if (buf_offset > ret->len) {
DPRINTF(" offset=%x is out of a descriptor #%d boundary=%x\n",
buf_offset, req->cur_desc_num, ret->len);
trace_spapr_vscsi_fetch_desc_out_of_desc_boundary(buf_offset,
req->cur_desc_num,
ret->len);
return -1;
}
ret->va += buf_offset;
ret->len -= buf_offset;
DPRINTF(" cur=%d offs=%x ret { va=%"PRIx64" len=%x }\n",
req->cur_desc_num, req->cur_desc_offset, ret->va, ret->len);
trace_spapr_vscsi_fetch_desc_done(req->cur_desc_num, req->cur_desc_offset,
ret->va, ret->len);
return ret->len ? 1 : 0;
}
@ -398,7 +388,7 @@ static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req,
int rc = 0;
uint32_t llen, total = 0;
DPRINTF("VSCSI: indirect segment 0x%x bytes\n", len);
trace_spapr_vscsi_srp_indirect_data(len);
/* While we have data ... */
while (len) {
@ -417,11 +407,10 @@ static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req,
rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen);
}
if (rc) {
DPRINTF("VSCSI: spapr_vio_dma_r/w(%d) -> %d\n", req->writing, rc);
trace_spapr_vscsi_srp_indirect_data_rw(req->writing, rc);
break;
}
DPRINTF("VSCSI: data: %02x %02x %02x %02x...\n",
buf[0], buf[1], buf[2], buf[3]);
trace_spapr_vscsi_srp_indirect_data_buf(buf[0], buf[1], buf[2], buf[3]);
len -= llen;
buf += llen;
@ -447,7 +436,7 @@ static int vscsi_srp_transfer_data(VSCSIState *s, vscsi_req *req,
switch (req->dma_fmt) {
case SRP_NO_DATA_DESC:
DPRINTF("VSCSI: no data desc transfer, skipping 0x%x bytes\n", len);
trace_spapr_vscsi_srp_transfer_data(len);
break;
case SRP_DATA_DESC_DIRECT:
err = vscsi_srp_direct_data(s, req, buf, len);
@ -527,8 +516,7 @@ static void vscsi_transfer_data(SCSIRequest *sreq, uint32_t len)
uint8_t *buf;
int rc = 0;
DPRINTF("VSCSI: SCSI xfer complete tag=0x%x len=0x%x, req=%p\n",
sreq->tag, len, req);
trace_spapr_vscsi_transfer_data(sreq->tag, len, req);
if (req == NULL) {
fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag);
return;
@ -557,8 +545,7 @@ static void vscsi_command_complete(SCSIRequest *sreq, uint32_t status, size_t re
vscsi_req *req = sreq->hba_private;
int32_t res_in = 0, res_out = 0;
DPRINTF("VSCSI: SCSI cmd complete, tag=0x%x status=0x%x, req=%p\n",
sreq->tag, status, req);
trace_spapr_vscsi_command_complete(sreq->tag, status, req);
if (req == NULL) {
fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag);
return;
@ -567,16 +554,15 @@ static void vscsi_command_complete(SCSIRequest *sreq, uint32_t status, size_t re
if (status == CHECK_CONDITION) {
req->senselen = scsi_req_get_sense(req->sreq, req->sense,
sizeof(req->sense));
DPRINTF("VSCSI: Sense data, %d bytes:\n", req->senselen);
DPRINTF(" %02x %02x %02x %02x %02x %02x %02x %02x\n",
trace_spapr_vscsi_command_complete_sense_data1(req->senselen,
req->sense[0], req->sense[1], req->sense[2], req->sense[3],
req->sense[4], req->sense[5], req->sense[6], req->sense[7]);
DPRINTF(" %02x %02x %02x %02x %02x %02x %02x %02x\n",
trace_spapr_vscsi_command_complete_sense_data2(
req->sense[8], req->sense[9], req->sense[10], req->sense[11],
req->sense[12], req->sense[13], req->sense[14], req->sense[15]);
}
DPRINTF("VSCSI: Command complete err=%d\n", status);
trace_spapr_vscsi_command_complete_status(status);
if (status == 0) {
/* We handle overflows, not underflows for normal commands,
* but hopefully nobody cares
@ -635,8 +621,8 @@ static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq)
vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL);
DPRINTF("VSCSI: saving tag=%u, current desc#%d, offset=%x\n",
req->qtag, req->cur_desc_num, req->cur_desc_offset);
trace_spapr_vscsi_save_request(req->qtag, req->cur_desc_num,
req->cur_desc_offset);
}
static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq)
@ -660,8 +646,8 @@ static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq)
req->sreq = scsi_req_ref(sreq);
DPRINTF("VSCSI: restoring tag=%u, current desc#%d, offset=%x\n",
req->qtag, req->cur_desc_num, req->cur_desc_offset);
trace_spapr_vscsi_load_request(req->qtag, req->cur_desc_num,
req->cur_desc_offset);
return req;
}
@ -672,7 +658,7 @@ static void vscsi_process_login(VSCSIState *s, vscsi_req *req)
struct srp_login_rsp *rsp = &iu->srp.login_rsp;
uint64_t tag = iu->srp.rsp.tag;
DPRINTF("VSCSI: Got login, sendin response !\n");
trace_spapr_vscsi__process_login();
/* TODO handle case that requested size is wrong and
* buffer format is wrong
@ -795,8 +781,7 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
sdev = vscsi_device_find(&s->bus, be64_to_cpu(srp->cmd.lun), &lun);
if (!sdev) {
DPRINTF("VSCSI: Command for lun %08" PRIx64 " with no drive\n",
be64_to_cpu(srp->cmd.lun));
trace_spapr_vscsi_queue_cmd_no_drive(be64_to_cpu(srp->cmd.lun));
if (srp->cmd.cdb[0] == INQUIRY) {
vscsi_inquiry_no_target(s, req);
} else {
@ -808,9 +793,8 @@ static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, req);
n = scsi_req_enqueue(req->sreq);
DPRINTF("VSCSI: Queued command tag 0x%x CMD 0x%x=%s LUN %d ret: %d\n",
req->qtag, srp->cmd.cdb[0], scsi_command_name(srp->cmd.cdb[0]),
lun, n);
trace_spapr_vscsi_queue_cmd(req->qtag, srp->cmd.cdb[0],
scsi_command_name(srp->cmd.cdb[0]), lun, n);
if (n) {
/* Transfer direction must be set before preprocessing the
@ -1141,7 +1125,7 @@ static int vscsi_do_crq(struct VIOsPAPRDevice *dev, uint8_t *crq_data)
crq.s.IU_length = be16_to_cpu(crq.s.IU_length);
crq.s.IU_data_ptr = be64_to_cpu(crq.s.IU_data_ptr);
DPRINTF("VSCSI: do_crq %02x %02x ...\n", crq.raw[0], crq.raw[1]);
trace_spapr_vscsi_do_crq(crq.raw[0], crq.raw[1]);
switch (crq.s.valid) {
case 0xc0: /* Init command/response */

View file

@ -202,3 +202,30 @@ esp_pci_dma_abort(uint32_t val) "ABORT (%.8x)"
esp_pci_dma_start(uint32_t val) "START (%.8x)"
esp_pci_sbac_read(uint32_t reg) "sbac: 0x%8.8x"
esp_pci_sbac_write(uint32_t reg, uint32_t val) "sbac: 0x%8.8x -> 0x%8.8x"
# hw/scsi/spapr_vscsi.c
spapr_vscsi_send_rsp(uint8_t status, int32_t res_in, int32_t res_out) "status: 0x%x, res_in: %"PRId32", res_out: %"PRId32
spapr_vscsi_fetch_desc_no_data(void) "no data descriptor"
spapr_vscsi_fetch_desc_direct(void) "direct segment"
spapr_vscsi_fetch_desc_indirect(uint32_t qtag, unsigned desc, unsigned local_desc) "indirect segment local tag=0x%"PRIx32" desc#%u/%u"
spapr_vscsi_fetch_desc_out_of_range(unsigned desc, unsigned desc_offset) "#%u is ouf of range (%u bytes)"
spapr_vscsi_fetch_desc_dma_read_error(int rc) "spapr_vio_dma_read -> %d reading ext_desc"
spapr_vscsi_fetch_desc_indirect_seg_ext(uint32_t qtag, unsigned n, unsigned desc, uint64_t va, uint32_t len) "indirect segment ext. tag=0x%"PRIx32" desc#%u/%u { va=0x%"PRIx64" len=0x%"PRIx32" }"
spapr_vscsi_fetch_desc_out_of_desc(void) "Out of descriptors !"
spapr_vscsi_fetch_desc_out_of_desc_boundary(unsigned offset, unsigned desc, uint32_t len) " offset=0x%x is out of a descriptor #%u boundary=%"PRIx32
spapr_vscsi_fetch_desc_done(unsigned desc_num, unsigned desc_offset, uint64_t va, uint32_t len) " cur=%u offs=0x%x ret { va=0x%"PRIx64" len=0x%"PRIx32" }"
spapr_vscsi_srp_indirect_data(uint32_t len) "indirect segment 0x%"PRIx32" bytes"
spapr_vscsi_srp_indirect_data_rw(int writing, int rc) "spapr_vio_dma_r/w(%d) -> %d"
spapr_vscsi_srp_indirect_data_buf(unsigned a, unsigned b, unsigned c, unsigned d) " data: %02x %02x %02x %02x..."
spapr_vscsi_srp_transfer_data(uint32_t len) "no data desc transfer, skipping 0x%"PRIx32" bytes"
spapr_vscsi_transfer_data(uint32_t tag, uint32_t len, void *req) "SCSI xfer complete tag=0x%"PRIx32" len=0x%"PRIx32", req=%p"
spapr_vscsi_command_complete(uint32_t tag, uint32_t status, void *req) "SCSI cmd complete, tag=0x%"PRIx32" status=0x%"PRIx32", req=%p"
spapr_vscsi_command_complete_sense_data1(uint32_t len, unsigned s0, unsigned s1, unsigned s2, unsigned s3, unsigned s4, unsigned s5, unsigned s6, unsigned s7) "Sense data, %d bytes: %02x %02x %02x %02x %02x %02x %02x %02x"
spapr_vscsi_command_complete_sense_data2(unsigned s8, unsigned s9, unsigned s10, unsigned s11, unsigned s12, unsigned s13, unsigned s14, unsigned s15) " %02x %02x %02x %02x %02x %02x %02x %02x"
spapr_vscsi_command_complete_status(uint32_t status) "Command complete err=%"PRIu32
spapr_vscsi_save_request(uint32_t qtag, unsigned desc, unsigned offset) "saving tag=%"PRIu32", current desc#%u, offset=0x%x"
spapr_vscsi_load_request(uint32_t qtag, unsigned desc, unsigned offset) "restoring tag=%"PRIu32", current desc#%u, offset=0x%x"
spapr_vscsi__process_login(void) "Got login, sending response !"
spapr_vscsi_queue_cmd_no_drive(uint64_t lun) "Command for lun %08" PRIx64 " with no drive"
spapr_vscsi_queue_cmd(uint32_t qtag, unsigned cdb, const char *cmd, int lun, int ret) "Queued command tag 0x%"PRIx32" CMD 0x%x=%s LUN %d ret: %d"
spapr_vscsi_do_crq(unsigned c0, unsigned c1) "crq: %02x %02x ..."

141
include/hw/input/adb-keys.h Normal file
View file

@ -0,0 +1,141 @@
/*
* QEMU System Emulator
*
* Copyright (c) 2016 John Arbuckle
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
/*
* adb-keys.h
*
* Provides an enum of all the Macintosh keycodes.
* Additional information: http://www.archive.org/stream/apple-guide-macintosh-family-hardware/Apple_Guide_to_the_Macintosh_Family_Hardware_2e#page/n345/mode/2up
* page 308
*/
#ifndef ADB_KEYS_H
#define ADB_KEYS_H
enum {
ADB_KEY_A = 0x00,
ADB_KEY_B = 0x0b,
ADB_KEY_C = 0x08,
ADB_KEY_D = 0x02,
ADB_KEY_E = 0x0e,
ADB_KEY_F = 0x03,
ADB_KEY_G = 0x05,
ADB_KEY_H = 0x04,
ADB_KEY_I = 0x22,
ADB_KEY_J = 0x26,
ADB_KEY_K = 0x28,
ADB_KEY_L = 0x25,
ADB_KEY_M = 0x2e,
ADB_KEY_N = 0x2d,
ADB_KEY_O = 0x1f,
ADB_KEY_P = 0x23,
ADB_KEY_Q = 0x0c,
ADB_KEY_R = 0x0f,
ADB_KEY_S = 0x01,
ADB_KEY_T = 0x11,
ADB_KEY_U = 0x20,
ADB_KEY_V = 0x09,
ADB_KEY_W = 0x0d,
ADB_KEY_X = 0x07,
ADB_KEY_Y = 0x10,
ADB_KEY_Z = 0x06,
ADB_KEY_0 = 0x1d,
ADB_KEY_1 = 0x12,
ADB_KEY_2 = 0x13,
ADB_KEY_3 = 0x14,
ADB_KEY_4 = 0x15,
ADB_KEY_5 = 0x17,
ADB_KEY_6 = 0x16,
ADB_KEY_7 = 0x1a,
ADB_KEY_8 = 0x1c,
ADB_KEY_9 = 0x19,
ADB_KEY_GRAVE_ACCENT = 0x32,
ADB_KEY_MINUS = 0x1b,
ADB_KEY_EQUAL = 0x18,
ADB_KEY_DELETE = 0x33,
ADB_KEY_CAPS_LOCK = 0x39,
ADB_KEY_TAB = 0x30,
ADB_KEY_RETURN = 0x24,
ADB_KEY_LEFT_BRACKET = 0x21,
ADB_KEY_RIGHT_BRACKET = 0x1e,
ADB_KEY_BACKSLASH = 0x2a,
ADB_KEY_SEMICOLON = 0x29,
ADB_KEY_APOSTROPHE = 0x27,
ADB_KEY_COMMA = 0x2b,
ADB_KEY_PERIOD = 0x2f,
ADB_KEY_FORWARD_SLASH = 0x2c,
ADB_KEY_LEFT_SHIFT = 0x38,
ADB_KEY_RIGHT_SHIFT = 0x7b,
ADB_KEY_SPACEBAR = 0x31,
ADB_KEY_LEFT_CONTROL = 0x36,
ADB_KEY_RIGHT_CONTROL = 0x7d,
ADB_KEY_LEFT_OPTION = 0x3a,
ADB_KEY_RIGHT_OPTION = 0x7c,
ADB_KEY_COMMAND = 0x37,
ADB_KEY_KP_0 = 0x52,
ADB_KEY_KP_1 = 0x53,
ADB_KEY_KP_2 = 0x54,
ADB_KEY_KP_3 = 0x55,
ADB_KEY_KP_4 = 0x56,
ADB_KEY_KP_5 = 0x57,
ADB_KEY_KP_6 = 0x58,
ADB_KEY_KP_7 = 0x59,
ADB_KEY_KP_8 = 0x5b,
ADB_KEY_KP_9 = 0x5c,
ADB_KEY_KP_PERIOD = 0x41,
ADB_KEY_KP_ENTER = 0x4c,
ADB_KEY_KP_PLUS = 0x45,
ADB_KEY_KP_SUBTRACT = 0x4e,
ADB_KEY_KP_MULTIPLY = 0x43,
ADB_KEY_KP_DIVIDE = 0x4b,
ADB_KEY_KP_EQUAL = 0x51,
ADB_KEY_KP_CLEAR = 0x47,
ADB_KEY_UP = 0x3e,
ADB_KEY_DOWN = 0x3d,
ADB_KEY_LEFT = 0x3b,
ADB_KEY_RIGHT = 0x3c,
ADB_KEY_HELP = 0x72,
ADB_KEY_HOME = 0x73,
ADB_KEY_PAGE_UP = 0x74,
ADB_KEY_PAGE_DOWN = 0x79,
ADB_KEY_END = 0x77,
ADB_KEY_FORWARD_DELETE = 0x75,
ADB_KEY_ESC = 0x35,
ADB_KEY_F1 = 0x7a,
ADB_KEY_F2 = 0x78,
ADB_KEY_F3 = 0x63,
ADB_KEY_F4 = 0x76,
ADB_KEY_F5 = 0x60,
ADB_KEY_F6 = 0x61,
ADB_KEY_F7 = 0x62,
ADB_KEY_F8 = 0x64,
ADB_KEY_F9 = 0x65,
ADB_KEY_F10 = 0x6d,
ADB_KEY_F11 = 0x67,
ADB_KEY_F12 = 0x6f,
ADB_KEY_F13 = 0x69,
ADB_KEY_F14 = 0x6b,
ADB_KEY_F15 = 0x71,
ADB_KEY_VOLUME_UP = 0x48,
ADB_KEY_VOLUME_DOWN = 0x49,
ADB_KEY_VOLUME_MUTE = 0x4a,
ADB_KEY_POWER = 0x7f7f
};
/* Could not find the value for this key. */
/* #define ADB_KEY_EJECT */
#endif /* ADB_KEYS_H */

View file

@ -75,6 +75,8 @@ struct sPAPRPHBState {
bool ddw_enabled;
uint64_t page_size_mask;
uint64_t dma64_win_addr;
uint32_t numa_node;
};
#define SPAPR_PCI_MAX_INDEX 255

View file

@ -16,6 +16,10 @@
#define TYPE_SPAPR_CPU_CORE "spapr-cpu-core"
#define SPAPR_CPU_CORE(obj) \
OBJECT_CHECK(sPAPRCPUCore, (obj), TYPE_SPAPR_CPU_CORE)
#define SPAPR_CPU_CORE_CLASS(klass) \
OBJECT_CLASS_CHECK(sPAPRCPUCoreClass, (klass), TYPE_SPAPR_CPU_CORE)
#define SPAPR_CPU_CORE_GET_CLASS(obj) \
OBJECT_GET_CLASS(sPAPRCPUCoreClass, (obj), TYPE_SPAPR_CPU_CORE)
typedef struct sPAPRCPUCore {
/*< private >*/
@ -23,9 +27,13 @@ typedef struct sPAPRCPUCore {
/*< public >*/
void *threads;
ObjectClass *cpu_class;
} sPAPRCPUCore;
typedef struct sPAPRCPUCoreClass {
DeviceClass parent_class;
ObjectClass *cpu_class;
} sPAPRCPUCoreClass;
void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
char *spapr_get_cpu_core_type(const char *model);
@ -33,4 +41,5 @@ void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
Error **errp);
void spapr_cpu_core_class_init(ObjectClass *oc, void *data);
#endif

View file

@ -0,0 +1,10 @@
#ifndef HW_SPAPR_RTAS_H
#define HW_SPAPR_RTAS_H
/*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
uint64_t qtest_rtas_call(char *cmd, uint32_t nargs, uint64_t args,
uint32_t nret, uint64_t rets);
#endif /* HW_SPAPR_RTAS_H */

View file

@ -149,7 +149,7 @@ struct ICSState {
static inline bool ics_valid_irq(ICSState *ics, uint32_t nr)
{
return (nr >= ics->offset)
return (ics->offset != 0) && (nr >= ics->offset)
&& (nr < (ics->offset + ics->nr_irqs));
}

View file

@ -741,8 +741,12 @@ static uint32_t get_elf_hwcap(void)
Altivec/FP/SPE support. Anything else is just a bonus. */
#define GET_FEATURE(flag, feature) \
do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
#define GET_FEATURE2(flag, feature) \
do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0)
#define GET_FEATURE2(flags, feature) \
do { \
if ((cpu->env.insns_flags2 & flags) == flags) { \
features |= feature; \
} \
} while (0)
GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);

View file

@ -1992,12 +1992,12 @@ void cpu_loop(CPUPPCState *env)
if (ret == -TARGET_ERESTARTSYS) {
break;
}
env->nip += 4;
if (ret == (target_ulong)(-TARGET_QEMU_ESIGRETURN)) {
/* Returning from a successful sigreturn syscall.
Avoid corrupting register state. */
break;
}
env->nip += 4;
if (ret > (target_ulong)(-515)) {
env->crf[0] |= 0x1;
ret = -ret;

View file

@ -120,7 +120,9 @@
#define TARGET_NR_sysinfo 116
#define TARGET_NR_ipc 117
#define TARGET_NR_fsync 118
#if !defined(TARGET_PPC64)
#define TARGET_NR_sigreturn 119
#endif
#define TARGET_NR_clone 120
#define TARGET_NR_setdomainname 121
#define TARGET_NR_uname 122

View file

@ -4454,7 +4454,12 @@ struct target_mcontext {
target_ulong mc_gregs[48];
/* Includes fpscr. */
uint64_t mc_fregs[33];
#if defined(TARGET_PPC64)
/* Pointer to the vector regs */
target_ulong v_regs;
#else
target_ulong mc_pad[2];
#endif
/* We need to handle Altivec and SPE at the same time, which no
kernel needs to do. Fortunately, the kernel defines this bit to
be Altivec-register-large all the time, rather than trying to
@ -4464,15 +4469,30 @@ struct target_mcontext {
uint32_t spe[33];
/* Altivec vector registers. The packing of VSCR and VRSAVE
varies depending on whether we're PPC64 or not: PPC64 splits
them apart; PPC32 stuffs them together. */
them apart; PPC32 stuffs them together.
We also need to account for the VSX registers on PPC64
*/
#if defined(TARGET_PPC64)
#define QEMU_NVRREG 34
#define QEMU_NVRREG (34 + 16)
/* On ppc64, this mcontext structure is naturally *unaligned*,
* or rather it is aligned on a 8 bytes boundary but not on
* a 16 bytes one. This pad fixes it up. This is also why the
* vector regs are referenced by the v_regs pointer above so
* any amount of padding can be added here
*/
target_ulong pad;
#else
/* On ppc32, we are already aligned to 16 bytes */
#define QEMU_NVRREG 33
#endif
ppc_avr_t altivec[QEMU_NVRREG];
/* We cannot use ppc_avr_t here as we do *not* want the implied
* 16-bytes alignment that would result from it. This would have
* the effect of making the whole struct target_mcontext aligned
* which breaks the layout of struct target_ucontext on ppc64.
*/
uint64_t altivec[QEMU_NVRREG][2];
#undef QEMU_NVRREG
} mc_vregs __attribute__((__aligned__(16)));
} mc_vregs;
};
/* See arch/powerpc/include/asm/sigcontext.h. */
@ -4626,6 +4646,16 @@ static target_ulong get_sigframe(struct target_sigaction *ka,
return (oldsp - frame_size) & ~0xFUL;
}
#if ((defined(TARGET_WORDS_BIGENDIAN) && defined(HOST_WORDS_BIGENDIAN)) || \
(!defined(HOST_WORDS_BIGENDIAN) && !defined(TARGET_WORDS_BIGENDIAN)))
#define PPC_VEC_HI 0
#define PPC_VEC_LO 1
#else
#define PPC_VEC_HI 1
#define PPC_VEC_LO 0
#endif
static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
{
target_ulong msr = env->msr;
@ -4652,18 +4682,33 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
/* Save Altivec registers if necessary. */
if (env->insns_flags & PPC_ALTIVEC) {
uint32_t *vrsave;
for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
ppc_avr_t *avr = &env->avr[i];
ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
ppc_avr_t *vreg = (ppc_avr_t *)&frame->mc_vregs.altivec[i];
__put_user(avr->u64[0], &vreg->u64[0]);
__put_user(avr->u64[1], &vreg->u64[1]);
__put_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
__put_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
}
/* Set MSR_VR in the saved MSR value to indicate that
frame->mc_vregs contains valid data. */
msr |= MSR_VR;
__put_user((uint32_t)env->spr[SPR_VRSAVE],
&frame->mc_vregs.altivec[32].u32[3]);
#if defined(TARGET_PPC64)
vrsave = (uint32_t *)&frame->mc_vregs.altivec[33];
/* 64-bit needs to put a pointer to the vectors in the frame */
__put_user(h2g(frame->mc_vregs.altivec), &frame->v_regs);
#else
vrsave = (uint32_t *)&frame->mc_vregs.altivec[32];
#endif
__put_user((uint32_t)env->spr[SPR_VRSAVE], vrsave);
}
/* Save VSX second halves */
if (env->insns_flags2 & PPC2_VSX) {
uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
__put_user(env->vsr[i], &vsregs[i]);
}
}
/* Save floating point registers. */
@ -4743,17 +4788,39 @@ static void restore_user_regs(CPUPPCState *env,
/* Restore Altivec registers if necessary. */
if (env->insns_flags & PPC_ALTIVEC) {
ppc_avr_t *v_regs;
uint32_t *vrsave;
#if defined(TARGET_PPC64)
uint64_t v_addr;
/* 64-bit needs to recover the pointer to the vectors from the frame */
__get_user(v_addr, &frame->v_regs);
v_regs = g2h(v_addr);
#else
v_regs = (ppc_avr_t *)frame->mc_vregs.altivec;
#endif
for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
ppc_avr_t *avr = &env->avr[i];
ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
ppc_avr_t *vreg = &v_regs[i];
__get_user(avr->u64[0], &vreg->u64[0]);
__get_user(avr->u64[1], &vreg->u64[1]);
__get_user(avr->u64[PPC_VEC_HI], &vreg->u64[0]);
__get_user(avr->u64[PPC_VEC_LO], &vreg->u64[1]);
}
/* Set MSR_VEC in the saved MSR value to indicate that
frame->mc_vregs contains valid data. */
__get_user(env->spr[SPR_VRSAVE],
(target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
#if defined(TARGET_PPC64)
vrsave = (uint32_t *)&v_regs[33];
#else
vrsave = (uint32_t *)&v_regs[32];
#endif
__get_user(env->spr[SPR_VRSAVE], vrsave);
}
/* Restore VSX second halves */
if (env->insns_flags2 & PPC2_VSX) {
uint64_t *vsregs = (uint64_t *)&frame->mc_vregs.altivec[34];
for (i = 0; i < ARRAY_SIZE(env->vsr); i++) {
__get_user(env->vsr[i], &vsregs[i]);
}
}
/* Restore floating point registers. */
@ -4784,6 +4851,7 @@ static void restore_user_regs(CPUPPCState *env,
}
}
#if !defined(TARGET_PPC64)
static void setup_frame(int sig, struct target_sigaction *ka,
target_sigset_t *set, CPUPPCState *env)
{
@ -4791,9 +4859,6 @@ static void setup_frame(int sig, struct target_sigaction *ka,
struct target_sigcontext *sc;
target_ulong frame_addr, newsp;
int err = 0;
#if defined(TARGET_PPC64)
struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
#endif
frame_addr = get_sigframe(ka, env, sizeof(*frame));
trace_user_setup_frame(env, frame_addr);
@ -4803,11 +4868,7 @@ static void setup_frame(int sig, struct target_sigaction *ka,
__put_user(ka->_sa_handler, &sc->handler);
__put_user(set->sig[0], &sc->oldmask);
#if TARGET_ABI_BITS == 64
__put_user(set->sig[0] >> 32, &sc->_unused[3]);
#else
__put_user(set->sig[1], &sc->_unused[3]);
#endif
__put_user(h2g(&frame->mctx), &sc->regs);
__put_user(sig, &sc->signal);
@ -4836,22 +4897,7 @@ static void setup_frame(int sig, struct target_sigaction *ka,
env->gpr[3] = sig;
env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
#if defined(TARGET_PPC64)
if (get_ppc64_abi(image) < 2) {
/* ELFv1 PPC64 function pointers are pointers to OPD entries. */
struct target_func_ptr *handler =
(struct target_func_ptr *)g2h(ka->_sa_handler);
env->nip = tswapl(handler->entry);
env->gpr[2] = tswapl(handler->toc);
} else {
/* ELFv2 PPC64 function pointers are entry points, but R12
* must also be set */
env->nip = tswapl((target_ulong) ka->_sa_handler);
env->gpr[12] = env->nip;
}
#else
env->nip = (target_ulong) ka->_sa_handler;
#endif
/* Signal handlers are entered in big-endian mode. */
env->msr &= ~(1ull << MSR_LE);
@ -4863,6 +4909,7 @@ sigsegv:
unlock_user_struct(frame, frame_addr, 1);
force_sigsegv(sig);
}
#endif /* !defined(TARGET_PPC64) */
static void setup_rt_frame(int sig, struct target_sigaction *ka,
target_siginfo_t *info,
@ -4960,6 +5007,7 @@ sigsegv:
}
#if !defined(TARGET_PPC64)
long do_sigreturn(CPUPPCState *env)
{
struct target_sigcontext *sc = NULL;
@ -4996,6 +5044,7 @@ sigsegv:
force_sig(TARGET_SIGSEGV);
return -TARGET_QEMU_ESIGRETURN;
}
#endif /* !defined(TARGET_PPC64) */
/* See arch/powerpc/kernel/signal_32.c. */
static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
@ -5939,7 +5988,8 @@ static void handle_pending_signal(CPUArchState *cpu_env, int sig,
#endif
/* prepare the stack frame of the virtual CPU */
#if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
|| defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
|| defined(TARGET_OPENRISC) || defined(TARGET_TILEGX) \
|| defined(TARGET_PPC64)
/* These targets do not have traditional signals. */
setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
#else

View file

@ -1025,7 +1025,7 @@ int monitor_set_cpu(int cpu_index)
CPUState *mon_get_cpu(void)
{
if (!cur_mon->mon_cpu) {
monitor_set_cpu(0);
monitor_set_cpu(first_cpu->cpu_index);
}
cpu_synchronize_state(cur_mon->mon_cpu);
return cur_mon->mon_cpu;

66
qtest.c
View file

@ -27,6 +27,10 @@
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "qemu/error-report.h"
#include "qemu/cutils.h"
#ifdef TARGET_PPC64
#include "hw/ppc/spapr_rtas.h"
#endif
#define MAX_IRQ 256
@ -325,12 +329,13 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
} else if (strcmp(words[0], "outb") == 0 ||
strcmp(words[0], "outw") == 0 ||
strcmp(words[0], "outl") == 0) {
uint16_t addr;
uint32_t value;
unsigned long addr;
unsigned long value;
g_assert(words[1] && words[2]);
addr = strtoul(words[1], NULL, 0);
value = strtoul(words[2], NULL, 0);
g_assert(qemu_strtoul(words[1], NULL, 0, &addr) == 0);
g_assert(qemu_strtoul(words[2], NULL, 0, &value) == 0);
g_assert(addr <= 0xffff);
if (words[0][3] == 'b') {
cpu_outb(addr, value);
@ -344,11 +349,12 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
} else if (strcmp(words[0], "inb") == 0 ||
strcmp(words[0], "inw") == 0 ||
strcmp(words[0], "inl") == 0) {
uint16_t addr;
unsigned long addr;
uint32_t value = -1U;
g_assert(words[1]);
addr = strtoul(words[1], NULL, 0);
g_assert(qemu_strtoul(words[1], NULL, 0, &addr) == 0);
g_assert(addr <= 0xffff);
if (words[0][2] == 'b') {
value = cpu_inb(addr);
@ -367,8 +373,8 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
uint64_t value;
g_assert(words[1] && words[2]);
addr = strtoull(words[1], NULL, 0);
value = strtoull(words[2], NULL, 0);
g_assert(qemu_strtoull(words[1], NULL, 0, &addr) == 0);
g_assert(qemu_strtoull(words[2], NULL, 0, &value) == 0);
if (words[0][5] == 'b') {
uint8_t data = value;
@ -396,7 +402,7 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
uint64_t value = UINT64_C(-1);
g_assert(words[1]);
addr = strtoull(words[1], NULL, 0);
g_assert(qemu_strtoull(words[1], NULL, 0, &addr) == 0);
if (words[0][4] == 'b') {
uint8_t data;
@ -422,8 +428,8 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
char *enc;
g_assert(words[1] && words[2]);
addr = strtoull(words[1], NULL, 0);
len = strtoull(words[2], NULL, 0);
g_assert(qemu_strtoull(words[1], NULL, 0, &addr) == 0);
g_assert(qemu_strtoull(words[2], NULL, 0, &len) == 0);
data = g_malloc(len);
cpu_physical_memory_read(addr, data, len);
@ -444,8 +450,8 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
gchar *b64_data;
g_assert(words[1] && words[2]);
addr = strtoull(words[1], NULL, 0);
len = strtoull(words[2], NULL, 0);
g_assert(qemu_strtoull(words[1], NULL, 0, &addr) == 0);
g_assert(qemu_strtoull(words[2], NULL, 0, &len) == 0);
data = g_malloc(len);
cpu_physical_memory_read(addr, data, len);
@ -461,8 +467,8 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
size_t data_len;
g_assert(words[1] && words[2] && words[3]);
addr = strtoull(words[1], NULL, 0);
len = strtoull(words[2], NULL, 0);
g_assert(qemu_strtoull(words[1], NULL, 0, &addr) == 0);
g_assert(qemu_strtoull(words[2], NULL, 0, &len) == 0);
data_len = strlen(words[3]);
if (data_len < 3) {
@ -487,12 +493,12 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
} else if (strcmp(words[0], "memset") == 0) {
uint64_t addr, len;
uint8_t *data;
uint8_t pattern;
unsigned long pattern;
g_assert(words[1] && words[2] && words[3]);
addr = strtoull(words[1], NULL, 0);
len = strtoull(words[2], NULL, 0);
pattern = strtoull(words[3], NULL, 0);
g_assert(qemu_strtoull(words[1], NULL, 0, &addr) == 0);
g_assert(qemu_strtoull(words[2], NULL, 0, &len) == 0);
g_assert(qemu_strtoul(words[3], NULL, 0, &pattern) == 0);
if (len) {
data = g_malloc(len);
@ -510,8 +516,8 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
gsize out_len;
g_assert(words[1] && words[2] && words[3]);
addr = strtoull(words[1], NULL, 0);
len = strtoull(words[2], NULL, 0);
g_assert(qemu_strtoull(words[1], NULL, 0, &addr) == 0);
g_assert(qemu_strtoull(words[2], NULL, 0, &len) == 0);
data_len = strlen(words[3]);
if (data_len < 3) {
@ -531,11 +537,25 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
qtest_send_prefix(chr);
qtest_send(chr, "OK\n");
#ifdef TARGET_PPC64
} else if (strcmp(words[0], "rtas") == 0) {
uint64_t res, args, ret;
unsigned long nargs, nret;
g_assert(qemu_strtoul(words[2], NULL, 0, &nargs) == 0);
g_assert(qemu_strtoull(words[3], NULL, 0, &args) == 0);
g_assert(qemu_strtoul(words[4], NULL, 0, &nret) == 0);
g_assert(qemu_strtoull(words[5], NULL, 0, &ret) == 0);
res = qtest_rtas_call(words[1], nargs, args, nret, ret);
qtest_send_prefix(chr);
qtest_sendf(chr, "OK %"PRIu64"\n", res);
#endif
} else if (qtest_enabled() && strcmp(words[0], "clock_step") == 0) {
int64_t ns;
if (words[1]) {
ns = strtoll(words[1], NULL, 0);
g_assert(qemu_strtoll(words[1], NULL, 0, &ns) == 0);
} else {
ns = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
}
@ -547,7 +567,7 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
int64_t ns;
g_assert(words[1]);
ns = strtoll(words[1], NULL, 0);
g_assert(qemu_strtoll(words[1], NULL, 0, &ns) == 0);
qtest_clock_warp(ns);
qtest_send_prefix(chr);
qtest_sendf(chr, "OK %"PRIi64"\n",

View file

@ -1009,6 +1009,8 @@ struct CPUPPCState {
bool tlb_dirty; /* Set to non-zero when modifying TLB */
bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */
uint32_t tlb_need_flush; /* Delayed flush needed */
#define TLB_NEED_LOCAL_FLUSH 0x1
#define TLB_NEED_GLOBAL_FLUSH 0x2
#endif
/* Other registers */

View file

@ -711,7 +711,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* Any interrupt is context synchronizing, check if TCG TLB
* needs a delayed flush on ppc64
*/
check_tlb_flush(env);
check_tlb_flush(env, false);
}
void ppc_cpu_do_interrupt(CPUState *cs)
@ -973,7 +973,7 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
/* Context synchronizing: check if TCG TLB needs flush */
check_tlb_flush(env);
check_tlb_flush(env, false);
}
void helper_rfi(CPUPPCState *env)

View file

@ -18,7 +18,8 @@ DEF_HELPER_1(rfid, void, env)
DEF_HELPER_1(hrfid, void, env)
DEF_HELPER_2(store_lpcr, void, env, tl)
#endif
DEF_HELPER_1(check_tlb_flush, void, env)
DEF_HELPER_1(check_tlb_flush_local, void, env)
DEF_HELPER_1(check_tlb_flush_global, void, env)
#endif
DEF_HELPER_3(lmw, void, env, tl, i32)
@ -50,6 +51,8 @@ DEF_HELPER_FLAGS_1(cnttzd, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(popcntd, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_3(srad, tl, env, tl, tl)
DEF_HELPER_0(darn32, tl)
DEF_HELPER_0(darn64, tl)
#endif
DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32)
@ -250,6 +253,14 @@ DEF_HELPER_2(vspltisw, void, avr, i32)
DEF_HELPER_3(vspltb, void, avr, avr, i32)
DEF_HELPER_3(vsplth, void, avr, avr, i32)
DEF_HELPER_3(vspltw, void, avr, avr, i32)
DEF_HELPER_3(vextractub, void, avr, avr, i32)
DEF_HELPER_3(vextractuh, void, avr, avr, i32)
DEF_HELPER_3(vextractuw, void, avr, avr, i32)
DEF_HELPER_3(vextractd, void, avr, avr, i32)
DEF_HELPER_3(vinsertb, void, avr, avr, i32)
DEF_HELPER_3(vinserth, void, avr, avr, i32)
DEF_HELPER_3(vinsertw, void, avr, avr, i32)
DEF_HELPER_3(vinsertd, void, avr, avr, i32)
DEF_HELPER_2(vupkhpx, void, avr, avr)
DEF_HELPER_2(vupklpx, void, avr, avr)
DEF_HELPER_2(vupkhsb, void, avr, avr)
@ -262,6 +273,7 @@ DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr)
DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr)
DEF_HELPER_4(vpkshss, void, env, avr, avr, avr)
DEF_HELPER_4(vpkshus, void, env, avr, avr, avr)
DEF_HELPER_4(vpkswss, void, env, avr, avr, avr)
@ -317,10 +329,15 @@ DEF_HELPER_2(vclzb, void, avr, avr)
DEF_HELPER_2(vclzh, void, avr, avr)
DEF_HELPER_2(vclzw, void, avr, avr)
DEF_HELPER_2(vclzd, void, avr, avr)
DEF_HELPER_2(vctzb, void, avr, avr)
DEF_HELPER_2(vctzh, void, avr, avr)
DEF_HELPER_2(vctzw, void, avr, avr)
DEF_HELPER_2(vctzd, void, avr, avr)
DEF_HELPER_2(vpopcntb, void, avr, avr)
DEF_HELPER_2(vpopcnth, void, avr, avr)
DEF_HELPER_2(vpopcntw, void, avr, avr)
DEF_HELPER_2(vpopcntd, void, avr, avr)
DEF_HELPER_3(vbpermd, void, avr, avr, avr)
DEF_HELPER_3(vbpermq, void, avr, avr, avr)
DEF_HELPER_2(vgbbd, void, avr, avr)
DEF_HELPER_3(vpmsumb, void, avr, avr, avr)

View file

@ -154,16 +154,33 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
}
#if !defined(CONFIG_USER_ONLY)
static inline void check_tlb_flush(CPUPPCState *env)
static inline void check_tlb_flush(CPUPPCState *env, bool global)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
if (env->tlb_need_flush) {
env->tlb_need_flush = 0;
if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
tlb_flush(cs, 1);
env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
}
/* Propagate TLB invalidations to other CPUs when the guest uses broadcast
* TLB invalidation instructions.
*/
if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
CPUState *other_cs;
CPU_FOREACH(other_cs) {
if (other_cs != cs) {
PowerPCCPU *cpu = POWERPC_CPU(other_cs);
CPUPPCState *other_env = &cpu->env;
other_env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
tlb_flush(other_cs, 1);
}
}
env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
}
}
#else
static inline void check_tlb_flush(CPUPPCState *env) { }
static inline void check_tlb_flush(CPUPPCState *env, bool global) { }
#endif
#endif /* HELPER_REGS_H */

View file

@ -182,6 +182,22 @@ target_ulong helper_cnttzd(target_ulong t)
{
return ctz64(t);
}
/* Return invalid random number.
*
* FIXME: Add rng backend or other mechanism to get cryptographically suitable
* random number
*/
target_ulong helper_darn32(void)
{
return -1;
}
target_ulong helper_darn64(void)
{
return -1;
}
#endif
#if defined(TARGET_PPC64)
@ -1126,14 +1142,57 @@ void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
*r = result;
}
void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
ppc_avr_t *c)
{
ppc_avr_t result;
int i;
VECTOR_FOR_INORDER_I(i, u8) {
int s = c->u8[i] & 0x1f;
#if defined(HOST_WORDS_BIGENDIAN)
int index = 15 - (s & 0xf);
#else
int index = s & 0xf;
#endif
if (s & 0x10) {
result.u8[i] = a->u8[index];
} else {
result.u8[i] = b->u8[index];
}
}
*r = result;
}
#if defined(HOST_WORDS_BIGENDIAN)
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
#define VBPERMD_INDEX(i) (i)
#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
#else
#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15-(i)])
#define VBPERMD_INDEX(i) (1 - i)
#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
#define EXTRACT_BIT(avr, i, index) \
(extract64((avr)->u64[1 - i], 63 - index, 1))
#endif
void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int i, j;
ppc_avr_t result = { .u64 = { 0, 0 } };
VECTOR_FOR_INORDER_I(i, u64) {
for (j = 0; j < 8; j++) {
int index = VBPERMQ_INDEX(b, (i * 8) + j);
if (index < 64 && EXTRACT_BIT(a, i, index)) {
result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j);
}
}
}
*r = result;
}
void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
int i;
@ -1792,6 +1851,51 @@ VSPLT(w, u32)
#undef VSPLT
#undef SPLAT_ELEMENT
#undef _SPLAT_MASKED
#if defined(HOST_WORDS_BIGENDIAN)
#define VINSERT(suffix, element) \
void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
{ \
memmove(&r->u8[index], &b->u8[8 - sizeof(r->element)], \
sizeof(r->element[0])); \
}
#else
#define VINSERT(suffix, element) \
void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
{ \
uint32_t d = (16 - index) - sizeof(r->element[0]); \
memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \
}
#endif
VINSERT(b, u8)
VINSERT(h, u16)
VINSERT(w, u32)
VINSERT(d, u64)
#undef VINSERT
#if defined(HOST_WORDS_BIGENDIAN)
#define VEXTRACT(suffix, element) \
void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
{ \
uint32_t es = sizeof(r->element[0]); \
memmove(&r->u8[8 - es], &b->u8[index], es); \
memset(&r->u8[8], 0, 8); \
memset(&r->u8[0], 0, 8 - es); \
}
#else
#define VEXTRACT(suffix, element) \
void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
{ \
uint32_t es = sizeof(r->element[0]); \
uint32_t s = (16 - index) - es; \
memmove(&r->u8[8], &b->u8[s], es); \
memset(&r->u8[0], 0, 8); \
memset(&r->u8[8 + es], 0, 8 - es); \
}
#endif
VEXTRACT(ub, u8)
VEXTRACT(uh, u16)
VEXTRACT(uw, u32)
VEXTRACT(d, u64)
#undef VEXTRACT
#define VSPLTI(suffix, element, splat_type) \
void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
@ -2038,6 +2142,21 @@ VGENERIC_DO(clzd, u64)
#undef clzw
#undef clzd
#define ctzb(v) ((v) ? ctz32(v) : 8)
#define ctzh(v) ((v) ? ctz32(v) : 16)
#define ctzw(v) ctz32((v))
#define ctzd(v) ctz64((v))
VGENERIC_DO(ctzb, u8)
VGENERIC_DO(ctzh, u16)
VGENERIC_DO(ctzw, u32)
VGENERIC_DO(ctzd, u64)
#undef ctzb
#undef ctzh
#undef ctzw
#undef ctzd
#define popcntb(v) ctpop8(v)
#define popcnth(v) ctpop16(v)
#define popcntw(v) ctpop32(v)

View file

@ -36,6 +36,7 @@
#include "hw/sysbus.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/ppc/spapr_cpu_core.h"
#include "hw/ppc/ppc.h"
#include "sysemu/watchdog.h"
#include "trace.h"
@ -427,6 +428,7 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
CPUPPCState *env = &cpu->env;
long rampagesize;
int iq, ik, jq, jk;
bool has_64k_pages = false;
/* We only handle page sizes for 64-bit server guests for now */
if (!(env->mmu_model & POWERPC_MMU_64)) {
@ -470,6 +472,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
ksps->enc[jk].page_shift)) {
continue;
}
if (ksps->enc[jk].page_shift == 16) {
has_64k_pages = true;
}
qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
@ -484,6 +489,9 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
env->mmu_model &= ~POWERPC_MMU_1TSEG;
}
if (!has_64k_pages) {
env->mmu_model &= ~POWERPC_MMU_64K;
}
}
#else /* defined (TARGET_PPC64) */
@ -2055,6 +2063,12 @@ void kvmppc_enable_set_mode_hcall(void)
kvmppc_enable_hcall(kvm_state, H_SET_MODE);
}
void kvmppc_enable_clear_ref_mod_hcalls(void)
{
kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
}
void kvmppc_set_papr(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
@ -2364,19 +2378,6 @@ PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
return pvr_pcc;
}
#if defined(TARGET_PPC64)
static void spapr_cpu_core_host_initfn(Object *obj)
{
sPAPRCPUCore *core = SPAPR_CPU_CORE(obj);
char *name = g_strdup_printf("%s-" TYPE_POWERPC_CPU, "host");
ObjectClass *oc = object_class_by_name(name);
g_assert(oc);
g_free((void *)name);
core->cpu_class = oc;
}
#endif
static int kvm_ppc_register_host_cpu_type(void)
{
TypeInfo type_info = {
@ -2404,14 +2405,16 @@ static int kvm_ppc_register_host_cpu_type(void)
#if defined(TARGET_PPC64)
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
type_info.parent = TYPE_SPAPR_CPU_CORE,
type_info.instance_size = sizeof(sPAPRCPUCore),
type_info.instance_init = spapr_cpu_core_host_initfn,
type_info.class_init = NULL;
type_info.instance_size = sizeof(sPAPRCPUCore);
type_info.instance_init = NULL;
type_info.class_init = spapr_cpu_core_class_init;
type_info.class_data = (void *) "host";
type_register(&type_info);
g_free((void *)type_info.name);
/* Register generic spapr CPU family class for current host CPU type */
type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, dc->desc);
type_info.class_data = (void *) dc->desc;
type_register(&type_info);
g_free((void *)type_info.name);
#endif

View file

@ -24,6 +24,7 @@ int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
void kvmppc_enable_logical_ci_hcalls(void);
void kvmppc_enable_set_mode_hcall(void);
void kvmppc_enable_clear_ref_mod_hcalls(void);
void kvmppc_set_papr(PowerPCCPU *cpu);
int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version);
void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy);
@ -113,6 +114,10 @@ static inline void kvmppc_enable_set_mode_hcall(void)
{
}
static inline void kvmppc_enable_clear_ref_mod_hcalls(void)
{
}
static inline void kvmppc_set_papr(PowerPCCPU *cpu)
{
}

View file

@ -110,7 +110,7 @@ void helper_slbia(CPUPPCState *env)
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
env->tlb_need_flush = 1;
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
}
}
}
@ -132,7 +132,7 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask)
* in QEMU, we just invalidate all TLBs
*/
env->tlb_need_flush = 1;
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
}
}
@ -912,7 +912,7 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
* invalidate, and we still don't have a tlb_flush_mask(env, n,
* mask) in QEMU, we just invalidate all TLBs
*/
tlb_flush(CPU(cpu), 1);
cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
}
void ppc_hash64_update_rmls(CPUPPCState *env)

View file

@ -1965,7 +1965,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
* we just mark the TLB to be flushed later (context synchronizing
* event or sync instruction on 32-bit).
*/
env->tlb_need_flush = 1;
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
@ -1979,7 +1979,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
* we just invalidate all TLBs
*/
env->tlb_need_flush = 1;
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
break;
#endif /* defined(TARGET_PPC64) */
default:
@ -2065,7 +2065,7 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
}
}
#else
env->tlb_need_flush = 1;
env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
#endif
}
}
@ -2757,7 +2757,7 @@ static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
CPUState *cs;
if (address & 0x4) {
/* flush all entries */
@ -2774,11 +2774,15 @@ void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
if (address & 0x8) {
/* flush TLB1 entries */
booke206_invalidate_ea_tlb(env, 1, address);
tlb_flush(CPU(cpu), 1);
CPU_FOREACH(cs) {
tlb_flush(cs, 1);
}
} else {
/* flush TLB0 entries */
booke206_invalidate_ea_tlb(env, 0, address);
tlb_flush_page(CPU(cpu), address & MAS2_EPN_MASK);
CPU_FOREACH(cs) {
tlb_flush_page(cs, address & MAS2_EPN_MASK);
}
}
}
@ -2867,9 +2871,14 @@ void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
}
void helper_check_tlb_flush(CPUPPCState *env)
void helper_check_tlb_flush_local(CPUPPCState *env)
{
check_tlb_flush(env);
check_tlb_flush(env, false);
}
void helper_check_tlb_flush_global(CPUPPCState *env)
{
check_tlb_flush(env, true);
}
/*****************************************************************************/

View file

@ -498,6 +498,8 @@ EXTRACT_HELPER(UIMM, 0, 16);
EXTRACT_HELPER(SIMM5, 16, 5);
/* 5 bits signed immediate value */
EXTRACT_HELPER(UIMM5, 16, 5);
/* 4 bits unsigned immediate value */
EXTRACT_HELPER(UIMM4, 16, 4);
/* Bit count */
EXTRACT_HELPER(NB, 11, 5);
/* Shift count */
@ -526,6 +528,10 @@ EXTRACT_HELPER(FPW, 16, 1);
/* addpcis */
EXTRACT_HELPER_DXFORM(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
#if defined(TARGET_PPC64)
/* darn */
EXTRACT_HELPER(L, 16, 2);
#endif
/*** Jump target decoding ***/
/* Immediate address */
@ -589,6 +595,8 @@ EXTRACT_HELPER(DM, 8, 2);
EXTRACT_HELPER(UIM, 16, 2);
EXTRACT_HELPER(SHW, 8, 2);
EXTRACT_HELPER(SP, 19, 2);
EXTRACT_HELPER(IMM8, 11, 8);
/*****************************************************************************/
/* PowerPC instructions table */
@ -1891,6 +1899,21 @@ static void gen_cnttzd(DisasContext *ctx)
gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
}
}
/* darn */
static void gen_darn(DisasContext *ctx)
{
int l = L(ctx->opcode);
if (l == 0) {
gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
} else if (l <= 2) {
/* Return 64-bit random for both CRN and RRN */
gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
} else {
tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
}
}
#endif
/*** Integer rotate ***/
@ -2460,87 +2483,75 @@ static inline void gen_align_no_le(DisasContext *ctx)
}
/*** Integer load ***/
static inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
#define GEN_QEMU_LOAD_TL(ldop, op) \
static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
TCGv val, \
TCGv addr) \
{ \
tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
}
static inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
#define GEN_QEMU_LOAD_64(ldop, op) \
static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
TCGv_i64 val, \
TCGv addr) \
{ \
tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
}
static inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_SW | ctx->default_tcg_memop_mask;
tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
#if defined(TARGET_PPC64)
GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
#endif
#define GEN_QEMU_STORE_TL(stop, op) \
static void glue(gen_qemu_, stop)(DisasContext *ctx, \
TCGv val, \
TCGv addr) \
{ \
tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
}
static inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
#define GEN_QEMU_STORE_64(stop, op) \
static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
TCGv_i64 val, \
TCGv addr) \
{ \
tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
}
static void gen_qemu_ld32u_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
{
TCGv tmp = tcg_temp_new();
gen_qemu_ld32u(ctx, tmp, addr);
tcg_gen_extu_tl_i64(val, tmp);
tcg_temp_free(tmp);
}
GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
static inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_SL | ctx->default_tcg_memop_mask;
tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
}
static void gen_qemu_ld32s_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
{
TCGv tmp = tcg_temp_new();
gen_qemu_ld32s(ctx, tmp, addr);
tcg_gen_ext_tl_i64(val, tmp);
tcg_temp_free(tmp);
}
static inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
}
static inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
}
static inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_UW | ctx->default_tcg_memop_mask;
tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
}
static inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_UL | ctx->default_tcg_memop_mask;
tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
}
static void gen_qemu_st32_i64(DisasContext *ctx, TCGv_i64 val, TCGv addr)
{
TCGv tmp = tcg_temp_new();
tcg_gen_trunc_i64_tl(tmp, val);
gen_qemu_st32(ctx, tmp, addr);
tcg_temp_free(tmp);
}
static inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGMemOp op = MO_Q | ctx->default_tcg_memop_mask;
tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
}
#if defined(TARGET_PPC64)
GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
#endif
#define GEN_LD(name, ldop, opc, type) \
static void glue(gen_, name)(DisasContext *ctx) \
@ -2628,12 +2639,12 @@ GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
/* lwax */
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
/* ldux */
GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B);
GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
/* ldx */
GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B);
GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
/* CI load/store variants */
GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
@ -2656,7 +2667,7 @@ static void gen_ld(DisasContext *ctx)
gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
} else {
/* ld - ldu */
gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
}
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
@ -2693,16 +2704,16 @@ static void gen_lq(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x0F);
/* We only need to swap high and low halves. gen_qemu_ld64 does necessary
64-bit byteswap already. */
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
} else {
gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
}
tcg_temp_free(EA);
}
@ -2785,9 +2796,9 @@ GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
/* stw stwu stwux stwx */
GEN_STS(stw, st32, 0x04, PPC_INTEGER);
#if defined(TARGET_PPC64)
GEN_STUX(std, st64, 0x15, 0x05, PPC_64B);
GEN_STX(std, st64, 0x15, 0x04, PPC_64B);
GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
@ -2824,16 +2835,16 @@ static void gen_std(DisasContext *ctx)
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
/* We only need to swap high and low halves. gen_qemu_st64 does
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_st64(ctx, cpu_gpr[rs], EA);
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
} else {
gen_qemu_st64(ctx, cpu_gpr[rs], EA);
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
}
tcg_temp_free(EA);
} else {
@ -2847,7 +2858,7 @@ static void gen_std(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0x03);
gen_qemu_st64(ctx, cpu_gpr[rs], EA);
gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
tcg_temp_free(EA);
@ -2857,57 +2868,23 @@ static void gen_std(DisasContext *ctx)
/*** Integer load and store with byte reverse ***/
/* lhbrx */
static inline void gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
}
GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
/* lwbrx */
static inline void gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
tcg_gen_qemu_ld_tl(arg1, arg2, ctx->mem_idx, op);
}
GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
#if defined(TARGET_PPC64)
/* ldbrx */
static inline void gen_qemu_ld64ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
tcg_gen_qemu_ld_i64(arg1, arg2, ctx->mem_idx, op);
}
GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
/* stdbrx */
GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
#endif /* TARGET_PPC64 */
/* sthbrx */
static inline void gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_UW | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
}
GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
/* stwbrx */
static inline void gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_UL | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
tcg_gen_qemu_st_tl(arg1, arg2, ctx->mem_idx, op);
}
GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
#if defined(TARGET_PPC64)
/* stdbrx */
static inline void gen_qemu_st64r(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
TCGMemOp op = MO_Q | (ctx->default_tcg_memop_mask ^ MO_BSWAP);
tcg_gen_qemu_st_i64(arg1, arg2, ctx->mem_idx, op);
}
GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
#endif /* TARGET_PPC64 */
/*** Integer load and store multiple ***/
/* lmw */
@ -3064,7 +3041,7 @@ static void gen_eieio(DisasContext *ctx)
}
#if !defined(CONFIG_USER_ONLY)
static inline void gen_check_tlb_flush(DisasContext *ctx)
static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
{
TCGv_i32 t;
TCGLabel *l;
@ -3076,12 +3053,16 @@ static inline void gen_check_tlb_flush(DisasContext *ctx)
t = tcg_temp_new_i32();
tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
gen_helper_check_tlb_flush(cpu_env);
if (global) {
gen_helper_check_tlb_flush_global(cpu_env);
} else {
gen_helper_check_tlb_flush_local(cpu_env);
}
gen_set_label(l);
tcg_temp_free_i32(t);
}
#else
static inline void gen_check_tlb_flush(DisasContext *ctx) { }
static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
#endif
/* isync */
@ -3092,49 +3073,51 @@ static void gen_isync(DisasContext *ctx)
* kernel mode however so check MSR_PR
*/
if (!ctx->pr) {
gen_check_tlb_flush(ctx);
gen_check_tlb_flush(ctx, false);
}
gen_stop_exception(ctx);
}
#define LARX(name, len, loadop) \
#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
#define LARX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv t0; \
TCGv gpr = cpu_gpr[rD(ctx->opcode)]; \
int len = MEMOP_GET_SIZE(memop); \
gen_set_access_type(ctx, ACCESS_RES); \
t0 = tcg_temp_local_new(); \
gen_addr_reg_index(ctx, t0); \
if ((len) > 1) { \
gen_check_align(ctx, t0, (len)-1); \
} \
gen_qemu_##loadop(ctx, gpr, t0); \
tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \
tcg_gen_mov_tl(cpu_reserve, t0); \
tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)); \
tcg_temp_free(t0); \
}
/* lwarx */
LARX(lbarx, 1, ld8u);
LARX(lharx, 2, ld16u);
LARX(lwarx, 4, ld32u);
LARX(lbarx, DEF_MEMOP(MO_UB))
LARX(lharx, DEF_MEMOP(MO_UW))
LARX(lwarx, DEF_MEMOP(MO_UL))
#if defined(CONFIG_USER_ONLY)
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
int reg, int size)
int reg, int memop)
{
TCGv t0 = tcg_temp_new();
tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
tcg_gen_movi_tl(t0, (size << 5) | reg);
tcg_gen_movi_tl(t0, (MEMOP_GET_SIZE(memop) << 5) | reg);
tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
tcg_temp_free(t0);
gen_exception_err(ctx, POWERPC_EXCP_STCX, 0);
}
#else
static void gen_conditional_store(DisasContext *ctx, TCGv EA,
int reg, int size)
int reg, int memop)
{
TCGLabel *l1;
@ -3142,65 +3125,36 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA,
l1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
#if defined(TARGET_PPC64)
if (size == 8) {
gen_qemu_st64(ctx, cpu_gpr[reg], EA);
} else
#endif
if (size == 4) {
gen_qemu_st32(ctx, cpu_gpr[reg], EA);
} else if (size == 2) {
gen_qemu_st16(ctx, cpu_gpr[reg], EA);
#if defined(TARGET_PPC64)
} else if (size == 16) {
TCGv gpr1, gpr2 , EA8;
if (unlikely(ctx->le_mode)) {
gpr1 = cpu_gpr[reg+1];
gpr2 = cpu_gpr[reg];
} else {
gpr1 = cpu_gpr[reg];
gpr2 = cpu_gpr[reg+1];
}
gen_qemu_st64(ctx, gpr1, EA);
EA8 = tcg_temp_local_new();
gen_addr_add(ctx, EA8, EA, 8);
gen_qemu_st64(ctx, gpr2, EA8);
tcg_temp_free(EA8);
#endif
} else {
gen_qemu_st8(ctx, cpu_gpr[reg], EA);
}
tcg_gen_qemu_st_tl(cpu_gpr[reg], EA, ctx->mem_idx, memop);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_reserve, -1);
}
#endif
#define STCX(name, len) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv t0; \
if (unlikely((len == 16) && (rD(ctx->opcode) & 1))) { \
gen_inval_exception(ctx, \
POWERPC_EXCP_INVAL_INVAL); \
return; \
} \
gen_set_access_type(ctx, ACCESS_RES); \
t0 = tcg_temp_local_new(); \
gen_addr_reg_index(ctx, t0); \
if (len > 1) { \
gen_check_align(ctx, t0, (len)-1); \
} \
gen_conditional_store(ctx, t0, rS(ctx->opcode), len); \
tcg_temp_free(t0); \
#define STCX(name, memop) \
static void gen_##name(DisasContext *ctx) \
{ \
TCGv t0; \
int len = MEMOP_GET_SIZE(memop); \
gen_set_access_type(ctx, ACCESS_RES); \
t0 = tcg_temp_local_new(); \
gen_addr_reg_index(ctx, t0); \
if (len > 1) { \
gen_check_align(ctx, t0, (len) - 1); \
} \
gen_conditional_store(ctx, t0, rS(ctx->opcode), memop); \
tcg_temp_free(t0); \
}
STCX(stbcx_, 1);
STCX(sthcx_, 2);
STCX(stwcx_, 4);
STCX(stbcx_, DEF_MEMOP(MO_UB))
STCX(sthcx_, DEF_MEMOP(MO_UW))
STCX(stwcx_, DEF_MEMOP(MO_UL))
#if defined(TARGET_PPC64)
/* ldarx */
LARX(ldarx, 8, ld64);
LARX(ldarx, DEF_MEMOP(MO_Q))
/* stdcx. */
STCX(stdcx_, DEF_MEMOP(MO_Q))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
@ -3226,21 +3180,63 @@ static void gen_lqarx(DisasContext *ctx)
gpr1 = cpu_gpr[rd];
gpr2 = cpu_gpr[rd+1];
}
gen_qemu_ld64(ctx, gpr1, EA);
tcg_gen_qemu_ld_i64(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8);
gen_qemu_ld64(ctx, gpr2, EA);
tcg_gen_qemu_ld_i64(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
tcg_temp_free(EA);
}
/* stdcx. */
STCX(stdcx_, 8);
STCX(stqcx_, 16);
/* stqcx. */
static void gen_stqcx_(DisasContext *ctx)
{
TCGv EA;
int reg = rS(ctx->opcode);
int len = 16;
#if !defined(CONFIG_USER_ONLY)
TCGLabel *l1;
TCGv gpr1, gpr2;
#endif
if (unlikely((rD(ctx->opcode) & 1))) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
gen_set_access_type(ctx, ACCESS_RES);
EA = tcg_temp_local_new();
gen_addr_reg_index(ctx, EA);
if (len > 1) {
gen_check_align(ctx, EA, (len) - 1);
}
#if defined(CONFIG_USER_ONLY)
gen_conditional_store(ctx, EA, reg, 16);
#else
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
l1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
if (unlikely(ctx->le_mode)) {
gpr1 = cpu_gpr[reg + 1];
gpr2 = cpu_gpr[reg];
} else {
gpr1 = cpu_gpr[reg];
gpr2 = cpu_gpr[reg + 1];
}
tcg_gen_qemu_st_tl(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
gen_addr_add(ctx, EA, EA, 8);
tcg_gen_qemu_st_tl(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
gen_set_label(l1);
tcg_gen_movi_tl(cpu_reserve, -1);
#endif
tcg_temp_free(EA);
}
#endif /* defined(TARGET_PPC64) */
/* sync */
@ -3257,7 +3253,7 @@ static void gen_sync(DisasContext *ctx)
* check MSR_PR as well.
*/
if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
gen_check_tlb_flush(ctx);
gen_check_tlb_flush(ctx, true);
}
}
@ -3585,10 +3581,13 @@ static void gen_rfi(DisasContext *ctx)
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
/* FIXME: This instruction doesn't exist anymore on 64-bit server
* processors compliant with arch 2.x, we should remove it there,
* but we need to fix OpenBIOS not to use it on 970 first
/* This instruction doesn't exist anymore on 64-bit server
* processors compliant with arch 2.x
*/
if (ctx->insns_flags & PPC_SEGMENT_64B) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
/* Restore CPU state */
CHK_SV;
gen_update_cfar(ctx, ctx->nip - 4);
@ -4442,6 +4441,7 @@ static void gen_tlbie(DisasContext *ctx)
#if defined(CONFIG_USER_ONLY)
GEN_PRIV;
#else
TCGv_i32 t1;
CHK_HV;
if (NARROW_MODE(ctx)) {
@ -4452,6 +4452,11 @@ static void gen_tlbie(DisasContext *ctx)
} else {
gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
}
t1 = tcg_temp_new_i32();
tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
tcg_temp_free_i32(t1);
#endif /* defined(CONFIG_USER_ONLY) */
}
@ -4463,11 +4468,10 @@ static void gen_tlbsync(DisasContext *ctx)
#else
CHK_HV;
/* tlbsync is a nop for server, ptesync handles delayed tlb flush,
* embedded however needs to deal with tlbsync. We don't try to be
* fancy and swallow the overhead of checking for both.
*/
gen_check_tlb_flush(ctx);
/* BookS does both ptesync and tlbsync make tlbsync a nop for server */
if (ctx->insns_flags & PPC_BOOKE) {
gen_check_tlb_flush(ctx, true);
}
#endif /* defined(CONFIG_USER_ONLY) */
}
@ -6240,6 +6244,7 @@ GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
#endif
@ -6614,12 +6619,12 @@ GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER)
#if defined(TARGET_PPC64)
GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B)
GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B)
GEN_LDUX(ld, ld64, 0x15, 0x01, PPC_64B)
GEN_LDX(ld, ld64, 0x15, 0x00, PPC_64B)
GEN_LDX_E(ldbr, ld64ur, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B)
GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B)
GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
/* HV/P7 and later only */
GEN_LDX_HVRM(ldcix, ld64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
@ -6650,10 +6655,10 @@ GEN_STS(stb, st8, 0x06, PPC_INTEGER)
GEN_STS(sth, st16, 0x0C, PPC_INTEGER)
GEN_STS(stw, st32, 0x04, PPC_INTEGER)
#if defined(TARGET_PPC64)
GEN_STUX(std, st64, 0x15, 0x05, PPC_64B)
GEN_STX(std, st64, 0x15, 0x04, PPC_64B)
GEN_STX_E(stdbr, st64r, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
GEN_STX_HVRM(stdcix, st64, 0x15, 0x1f, PPC_CILDST)
GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B)
GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B)
GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)

View file

@ -672,7 +672,7 @@ static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
}
/* lfd lfdu lfdux lfdx */
GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT);
GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
/* lfs lfsu lfsux lfsx */
GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
@ -687,16 +687,16 @@ static void gen_lfdp(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0);
/* We only need to swap high and low halves. gen_qemu_ld64 does necessary
64-bit byteswap already. */
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@ -712,16 +712,16 @@ static void gen_lfdpx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
/* We only need to swap high and low halves. gen_qemu_ld64 does necessary
64-bit byteswap already. */
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@ -848,7 +848,7 @@ static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
}
/* stfd stfdu stfdux stfdx */
GEN_STFS(stfd, st64, 0x16, PPC_FLOAT);
GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
/* stfs stfsu stfsux stfsx */
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
@ -863,16 +863,16 @@ static void gen_stfdp(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_imm_index(ctx, EA, 0);
/* We only need to swap high and low halves. gen_qemu_st64 does necessary
64-bit byteswap already. */
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@ -888,16 +888,16 @@ static void gen_stfdpx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
/* We only need to swap high and low halves. gen_qemu_st64 does necessary
64-bit byteswap already. */
/* We only need to swap high and low halves. gen_qemu_st64_i64 does
necessary 64-bit byteswap already. */
if (unlikely(ctx->le_mode)) {
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
} else {
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_st64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
}
tcg_temp_free(EA);
}
@ -924,9 +924,9 @@ static void gen_lfq(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
@ -940,9 +940,9 @@ static void gen_lfqu(DisasContext *ctx)
t0 = tcg_temp_new();
t1 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t1, t0, 8);
gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
@ -958,10 +958,10 @@ static void gen_lfqux(DisasContext *ctx)
TCGv t0, t1;
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8);
gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
@ -976,9 +976,9 @@ static void gen_lfqx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
@ -990,9 +990,9 @@ static void gen_stfq(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
gen_qemu_st64(ctx, cpu_fpr[rd], t0);
gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
@ -1005,10 +1005,10 @@ static void gen_stfqu(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_imm_index(ctx, t0, 0);
gen_qemu_st64(ctx, cpu_fpr[rd], t0);
gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8);
gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
@ -1024,10 +1024,10 @@ static void gen_stfqux(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_qemu_st64(ctx, cpu_fpr[rd], t0);
gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
t1 = tcg_temp_new();
gen_addr_add(ctx, t1, t0, 8);
gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
@ -1042,9 +1042,9 @@ static void gen_stfqx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_FLOAT);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
gen_qemu_st64(ctx, cpu_fpr[rd], t0);
gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
gen_addr_add(ctx, t0, t0, 8);
gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}

View file

@ -85,7 +85,7 @@ GEN_STUF(name, stop, op | 0x21, type) \
GEN_STUXF(name, stop, op | 0x01, type) \
GEN_STXF(name, stop, 0x17, op | 0x00, type)
GEN_STFS(stfd, st64, 0x16, PPC_FLOAT)
GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT)
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
GEN_HANDLER_E(stfdp, 0x3D, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),

View file

@ -617,7 +617,7 @@ static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh)
static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
{
TCGv_i64 t0 = tcg_temp_new_i64();
gen_qemu_ld64(ctx, t0, addr);
gen_qemu_ld64_i64(ctx, t0, addr);
gen_store_gpr64(rD(ctx->opcode), t0);
tcg_temp_free_i64(t0);
}
@ -725,7 +725,7 @@ static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
{
TCGv_i64 t0 = tcg_temp_new_i64();
gen_load_gpr64(t0, rS(ctx->opcode));
gen_qemu_st64(ctx, t0, addr);
gen_qemu_st64_i64(ctx, t0, addr);
tcg_temp_free_i64(t0);
}

View file

@ -26,16 +26,16 @@ static void glue(gen_, name)(DisasContext *ctx)
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
/* We only need to swap high and low halves. gen_qemu_ld64 does necessary \
64-bit byteswap already. */ \
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
necessary 64-bit byteswap already. */ \
if (ctx->le_mode) { \
gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
@ -52,16 +52,16 @@ static void gen_st##name(DisasContext *ctx) \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
/* We only need to swap high and low halves. gen_qemu_st64 does necessary \
64-bit byteswap already. */ \
/* We only need to swap high and low halves. gen_qemu_st64_i64 does \
necessary 64-bit byteswap already. */ \
if (ctx->le_mode) { \
gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
@ -569,6 +569,21 @@ static void glue(gen_, name)(DisasContext *ctx) \
tcg_temp_free_ptr(rd); \
}
#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_ptr rb, rd; \
if (unlikely(!ctx->altivec_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(rd, rb); \
tcg_temp_free_ptr(rb); \
tcg_temp_free_ptr(rd); \
}
GEN_VXFORM_NOA(vupkhsb, 7, 8);
GEN_VXFORM_NOA(vupkhsh, 7, 9);
GEN_VXFORM_NOA(vupkhsw, 7, 25);
@ -639,13 +654,55 @@ static void glue(gen_, name)(DisasContext *ctx) \
tcg_temp_free_ptr(rd); \
}
#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \
static void glue(gen_, name)(DisasContext *ctx) \
{ \
TCGv_ptr rb, rd; \
uint8_t uimm = UIMM4(ctx->opcode); \
TCGv_i32 t0 = tcg_temp_new_i32(); \
if (unlikely(!ctx->altivec_enabled)) { \
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
if (uimm > splat_max) { \
uimm = 0; \
} \
tcg_gen_movi_i32(t0, uimm); \
rb = gen_avr_ptr(rB(ctx->opcode)); \
rd = gen_avr_ptr(rD(ctx->opcode)); \
gen_helper_##name(rd, rb, t0); \
tcg_temp_free_i32(t0); \
tcg_temp_free_ptr(rb); \
tcg_temp_free_ptr(rd); \
}
GEN_VXFORM_UIMM(vspltb, 6, 8);
GEN_VXFORM_UIMM(vsplth, 6, 9);
GEN_VXFORM_UIMM(vspltw, 6, 10);
GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15);
GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14);
GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12);
GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8);
GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
GEN_VXFORM_DUAL(vspltb, PPC_NONE, PPC2_ALTIVEC_207,
vextractub, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vsplth, PPC_NONE, PPC2_ALTIVEC_207,
vextractuh, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltw, PPC_NONE, PPC2_ALTIVEC_207,
vextractuw, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltisb, PPC_NONE, PPC2_ALTIVEC_207,
vinsertb, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltish, PPC_NONE, PPC2_ALTIVEC_207,
vinserth, PPC_NONE, PPC2_ISA300);
GEN_VXFORM_DUAL(vspltisw, PPC_NONE, PPC2_ALTIVEC_207,
vinsertw, PPC_NONE, PPC2_ISA300);
static void gen_vsldoi(DisasContext *ctx)
{
@ -709,6 +766,24 @@ static void gen_vmladduhm(DisasContext *ctx)
tcg_temp_free_ptr(rd);
}
static void gen_vpermr(DisasContext *ctx)
{
TCGv_ptr ra, rb, rc, rd;
if (unlikely(!ctx->altivec_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VPU);
return;
}
ra = gen_avr_ptr(rA(ctx->opcode));
rb = gen_avr_ptr(rB(ctx->opcode));
rc = gen_avr_ptr(rC(ctx->opcode));
rd = gen_avr_ptr(rD(ctx->opcode));
gen_helper_vpermr(cpu_env, rd, ra, rb, rc);
tcg_temp_free_ptr(ra);
tcg_temp_free_ptr(rb);
tcg_temp_free_ptr(rc);
tcg_temp_free_ptr(rd);
}
GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
@ -719,6 +794,10 @@ GEN_VXFORM_NOA(vclzb, 1, 28)
GEN_VXFORM_NOA(vclzh, 1, 29)
GEN_VXFORM_NOA(vclzw, 1, 30)
GEN_VXFORM_NOA(vclzd, 1, 31)
GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
GEN_VXFORM_NOA(vpopcntb, 1, 28)
GEN_VXFORM_NOA(vpopcnth, 1, 29)
GEN_VXFORM_NOA(vpopcntw, 1, 30)
@ -731,6 +810,7 @@ GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
GEN_VXFORM(vbpermd, 6, 23);
GEN_VXFORM(vbpermq, 6, 21);
GEN_VXFORM_NOA(vgbbd, 6, 20);
GEN_VXFORM(vpmsumb, 4, 16)

View file

@ -41,6 +41,13 @@ GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
#define GEN_VXFORM_300(name, opc2, opc3) \
GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300)
#define GEN_VXFORM_300_EXT(name, opc2, opc3, inval) \
GEN_HANDLER_E(name, 0x04, opc2, opc3, inval, PPC_NONE, PPC2_ISA300)
#define GEN_VXFORM_300_EO(name, opc2, opc3, opc4) \
GEN_HANDLER_E_2(name, 0x04, opc2, opc3, opc4, 0x00000000, PPC_NONE, \
PPC2_ISA300)
#define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \
GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1)
@ -191,11 +198,28 @@ GEN_VXRFORM(vcmpgefp, 3, 7)
GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE)
GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE)
#define GEN_VXFORM_SIMM(name, opc2, opc3) \
GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
GEN_VXFORM_SIMM(vspltisb, 6, 12),
GEN_VXFORM_SIMM(vspltish, 6, 13),
GEN_VXFORM_SIMM(vspltisw, 6, 14),
#define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \
GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \
PPC_NONE)
GEN_VXFORM_DUAL_INV(vspltb, vextractub, 6, 8, 0x00000000, 0x100000,
PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000,
PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000,
PPC2_ALTIVEC_207),
GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000),
GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000,
PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000,
PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000,
PPC2_ALTIVEC_207),
GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000),
GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C),
GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D),
GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E),
GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F),
GEN_VXFORM_300(vpermr, 0x1D, 0xFF),
#define GEN_VXFORM_NOA(name, opc2, opc3) \
GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
@ -218,9 +242,6 @@ GEN_VXFORM_NOA(vrfiz, 5, 9),
#define GEN_VXFORM_UIMM(name, opc2, opc3) \
GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
GEN_VXFORM_UIMM(vspltb, 6, 8),
GEN_VXFORM_UIMM(vsplth, 6, 9),
GEN_VXFORM_UIMM(vspltw, 6, 10),
GEN_VXFORM_UIMM(vcfux, 5, 12),
GEN_VXFORM_UIMM(vcfsx, 5, 13),
GEN_VXFORM_UIMM(vctuxs, 5, 14),
@ -241,6 +262,7 @@ GEN_VXFORM_DUAL(vclzh, vpopcnth, 1, 29, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL(vclzw, vpopcntw, 1, 30, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_DUAL(vclzd, vpopcntd, 1, 31, PPC_NONE, PPC2_ALTIVEC_207),
GEN_VXFORM_300(vbpermd, 6, 23),
GEN_VXFORM_207(vbpermq, 6, 21),
GEN_VXFORM_207(vgbbd, 6, 20),
GEN_VXFORM_207(vpmsumb, 4, 16),

View file

@ -34,8 +34,10 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free(EA); \
}
VSX_LOAD_SCALAR(lxsdx, ld64)
VSX_LOAD_SCALAR(lxsdx, ld64_i64)
VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
VSX_LOAD_SCALAR(lxsspx, ld32fs)
@ -49,9 +51,9 @@ static void gen_lxvd2x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_ld64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
tcg_temp_free(EA);
}
@ -65,7 +67,7 @@ static void gen_lxvdsx(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
gen_qemu_ld64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
tcg_temp_free(EA);
}
@ -115,7 +117,10 @@ static void gen_##name(DisasContext *ctx) \
tcg_temp_free(EA); \
}
VSX_STORE_SCALAR(stxsdx, st64)
VSX_STORE_SCALAR(stxsdx, st64_i64)
VSX_STORE_SCALAR(stxsibx, st8_i64)
VSX_STORE_SCALAR(stxsihx, st16_i64)
VSX_STORE_SCALAR(stxsiwx, st32_i64)
VSX_STORE_SCALAR(stxsspx, st32fs)
@ -129,9 +134,9 @@ static void gen_stxvd2x(DisasContext *ctx)
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
gen_qemu_st64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
tcg_gen_addi_tl(EA, EA, 8);
gen_qemu_st64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
tcg_temp_free(EA);
}
@ -647,6 +652,26 @@ static void gen_xxspltw(DisasContext *ctx)
tcg_temp_free_i64(b2);
}
#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
static void gen_xxspltib(DisasContext *ctx)
{
unsigned char uim8 = IMM8(ctx->opcode);
if (xS(ctx->opcode) < 32) {
if (unlikely(!ctx->altivec_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VPU);
return;
}
} else {
if (unlikely(!ctx->vsx_enabled)) {
gen_exception(ctx, POWERPC_EXCP_VSXU);
return;
}
}
tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8));
tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8));
}
static void gen_xxsldwi(DisasContext *ctx)
{
TCGv_i64 xth, xtl;

View file

@ -1,12 +1,16 @@
GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(lxsibzx, 0x1F, 0x0D, 0x18, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(lxsihzx, 0x1F, 0x0D, 0x19, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX),
GEN_HANDLER_E(stxsibx, 0x1F, 0xD, 0x1C, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stxsihx, 0x1F, 0xD, 0x1D, 0, PPC_NONE, PPC2_ISA300),
GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
@ -20,6 +24,10 @@ GEN_HANDLER_E(mfvsrd, 0x1F, 0x13, 0x01, 0x0000F800, PPC_NONE, PPC2_VSX207),
GEN_HANDLER_E(mtvsrd, 0x1F, 0x13, 0x05, 0x0000F800, PPC_NONE, PPC2_VSX207),
#endif
#define GEN_XX1FORM(name, opc2, opc3, fl2) \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
#define GEN_XX2FORM(name, opc2, opc3, fl2) \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
@ -222,6 +230,7 @@ VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX),
GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300),
GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
#define GEN_XXSEL_ROW(opc3) \

View file

@ -269,6 +269,7 @@ check-qtest-ppc64-y += tests/prom-env-test$(EXESUF)
check-qtest-ppc64-y += tests/drive_del-test$(EXESUF)
check-qtest-ppc64-y += tests/postcopy-test$(EXESUF)
check-qtest-ppc64-y += tests/boot-serial-test$(EXESUF)
check-qtest-ppc64-y += tests/rtas-test$(EXESUF)
check-qtest-sh4-y = tests/endianness-test$(EXESUF)
@ -585,6 +586,9 @@ tests/test-crypto-block$(EXESUF): tests/test-crypto-block.o $(test-crypto-obj-y)
libqos-obj-y = tests/libqos/pci.o tests/libqos/fw_cfg.o tests/libqos/malloc.o
libqos-obj-y += tests/libqos/i2c.o tests/libqos/libqos.o
libqos-spapr-obj-y = $(libqos-obj-y) tests/libqos/malloc-spapr.o
libqos-spapr-obj-y += tests/libqos/libqos-spapr.o
libqos-spapr-obj-y += tests/libqos/rtas.o
libqos-pc-obj-y = $(libqos-obj-y) tests/libqos/pci-pc.o
libqos-pc-obj-y += tests/libqos/malloc-pc.o tests/libqos/libqos-pc.o
libqos-pc-obj-y += tests/libqos/ahci.o
@ -599,6 +603,7 @@ tests/m48t59-test$(EXESUF): tests/m48t59-test.o
tests/endianness-test$(EXESUF): tests/endianness-test.o
tests/spapr-phb-test$(EXESUF): tests/spapr-phb-test.o $(libqos-obj-y)
tests/prom-env-test$(EXESUF): tests/prom-env-test.o $(libqos-obj-y)
tests/rtas-test$(EXESUF): tests/rtas-test.o $(libqos-spapr-obj-y)
tests/fdc-test$(EXESUF): tests/fdc-test.o
tests/ide-test$(EXESUF): tests/ide-test.o $(libqos-pc-obj-y)
tests/ahci-test$(EXESUF): tests/ahci-test.o $(libqos-pc-obj-y)

View file

@ -21,6 +21,8 @@ QOSState *qtest_pc_boot(const char *cmdline_fmt, ...)
qs = qtest_vboot(&qos_ops, cmdline_fmt, ap);
va_end(ap);
qtest_irq_intercept_in(global_qtest, "ioapic");
return qs;
}

View file

@ -0,0 +1,30 @@
#include "qemu/osdep.h"
#include "libqos/libqos-spapr.h"
#include "libqos/malloc-spapr.h"
static QOSOps qos_ops = {
.init_allocator = spapr_alloc_init_flags,
.uninit_allocator = spapr_alloc_uninit
};
QOSState *qtest_spapr_vboot(const char *cmdline_fmt, va_list ap)
{
return qtest_vboot(&qos_ops, cmdline_fmt, ap);
}
QOSState *qtest_spapr_boot(const char *cmdline_fmt, ...)
{
QOSState *qs;
va_list ap;
va_start(ap, cmdline_fmt);
qs = qtest_vboot(&qos_ops, cmdline_fmt, ap);
va_end(ap);
return qs;
}
void qtest_spapr_shutdown(QOSState *qs)
{
return qtest_shutdown(qs);
}

View file

@ -0,0 +1,10 @@
#ifndef LIBQOS_SPAPR_H
#define LIBQOS_SPAPR_H
#include "libqos/libqos.h"
QOSState *qtest_spapr_vboot(const char *cmdline_fmt, va_list ap);
QOSState *qtest_spapr_boot(const char *cmdline_fmt, ...);
void qtest_spapr_shutdown(QOSState *qs);
#endif

View file

@ -20,7 +20,6 @@ QOSState *qtest_vboot(QOSOps *ops, const char *cmdline_fmt, va_list ap)
cmdline = g_strdup_vprintf(cmdline_fmt, ap);
qs->qts = qtest_start(cmdline);
qs->ops = ops;
qtest_irq_intercept_in(global_qtest, "ioapic");
if (ops && ops->init_allocator) {
qs->alloc = ops->init_allocator(ALLOC_NO_FLAGS);
}

View file

@ -0,0 +1,38 @@
/*
* libqos malloc support for SPAPR
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "libqos/malloc-spapr.h"
#include "qemu-common.h"
#define PAGE_SIZE 4096
/* Memory must be a multiple of 256 MB,
* so we have at least 256MB
*/
#define SPAPR_MIN_SIZE 0x10000000
void spapr_alloc_uninit(QGuestAllocator *allocator)
{
alloc_uninit(allocator);
}
QGuestAllocator *spapr_alloc_init_flags(QAllocOpts flags)
{
QGuestAllocator *s;
s = alloc_init_flags(flags, 1 << 20, SPAPR_MIN_SIZE);
alloc_set_page_size(s, PAGE_SIZE);
return s;
}
QGuestAllocator *spapr_alloc_init(void)
{
return spapr_alloc_init_flags(ALLOC_NO_FLAGS);
}

View file

@ -0,0 +1,17 @@
/*
* libqos malloc support for SPAPR
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef LIBQOS_MALLOC_SPAPR_H
#define LIBQOS_MALLOC_SPAPR_H
#include "libqos/malloc.h"
QGuestAllocator *spapr_alloc_init(void);
QGuestAllocator *spapr_alloc_init_flags(QAllocOpts flags);
void spapr_alloc_uninit(QGuestAllocator *allocator);
#endif

71
tests/libqos/rtas.c Normal file
View file

@ -0,0 +1,71 @@
/*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "libqtest.h"
#include "libqos/rtas.h"
static void qrtas_copy_args(uint64_t target_args, uint32_t nargs,
uint32_t *args)
{
int i;
for (i = 0; i < nargs; i++) {
writel(target_args + i * sizeof(uint32_t), args[i]);
}
}
static void qrtas_copy_ret(uint64_t target_ret, uint32_t nret, uint32_t *ret)
{
int i;
for (i = 0; i < nret; i++) {
ret[i] = readl(target_ret + i * sizeof(uint32_t));
}
}
static uint64_t qrtas_call(QGuestAllocator *alloc, const char *name,
uint32_t nargs, uint32_t *args,
uint32_t nret, uint32_t *ret)
{
uint64_t res;
uint64_t target_args, target_ret;
target_args = guest_alloc(alloc, nargs * sizeof(uint32_t));
target_ret = guest_alloc(alloc, nret * sizeof(uint32_t));
qrtas_copy_args(target_args, nargs, args);
res = qtest_rtas_call(global_qtest, name,
nargs, target_args, nret, target_ret);
qrtas_copy_ret(target_ret, nret, ret);
guest_free(alloc, target_ret);
guest_free(alloc, target_args);
return res;
}
int qrtas_get_time_of_day(QGuestAllocator *alloc, struct tm *tm, uint32_t *ns)
{
int res;
uint32_t ret[8];
res = qrtas_call(alloc, "get-time-of-day", 0, NULL, 8, ret);
if (res != 0) {
return res;
}
res = ret[0];
memset(tm, 0, sizeof(*tm));
tm->tm_year = ret[1] - 1900;
tm->tm_mon = ret[2] - 1;
tm->tm_mday = ret[3];
tm->tm_hour = ret[4];
tm->tm_min = ret[5];
tm->tm_sec = ret[6];
*ns = ret[7];
return res;
}

11
tests/libqos/rtas.h Normal file
View file

@ -0,0 +1,11 @@
/*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef LIBQOS_RTAS_H
#define LIBQOS_RTAS_H
#include "libqos/malloc.h"
int qrtas_get_time_of_day(QGuestAllocator *alloc, struct tm *tm, uint32_t *ns);
#endif /* LIBQOS_RTAS_H */

View file

@ -751,6 +751,16 @@ void qtest_memread(QTestState *s, uint64_t addr, void *data, size_t size)
g_strfreev(args);
}
uint64_t qtest_rtas_call(QTestState *s, const char *name,
uint32_t nargs, uint64_t args,
uint32_t nret, uint64_t ret)
{
qtest_sendf(s, "rtas %s %u 0x%"PRIx64" %u 0x%"PRIx64"\n",
name, nargs, args, nret, ret);
qtest_rsp(s, 0);
return 0;
}
void qtest_add_func(const char *str, void (*fn)(void))
{
gchar *path = g_strdup_printf("/%s/%s", qtest_get_arch(), str);

View file

@ -317,6 +317,21 @@ uint64_t qtest_readq(QTestState *s, uint64_t addr);
*/
void qtest_memread(QTestState *s, uint64_t addr, void *data, size_t size);
/**
* qtest_rtas_call:
* @s: #QTestState instance to operate on.
* @name: name of the command to call.
* @nargs: Number of args.
* @args: Guest address to read args from.
* @nret: Number of return value.
* @ret: Guest address to write return values to.
*
* Call an RTAS function
*/
uint64_t qtest_rtas_call(QTestState *s, const char *name,
uint32_t nargs, uint64_t args,
uint32_t nret, uint64_t ret);
/**
* qtest_bufread:
* @s: #QTestState instance to operate on.

41
tests/rtas-test.c Normal file
View file

@ -0,0 +1,41 @@
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "libqtest.h"
#include "libqos/libqos-spapr.h"
#include "libqos/rtas.h"
static void test_rtas_get_time_of_day(void)
{
QOSState *qs;
struct tm tm;
uint32_t ns;
uint64_t ret;
time_t t1, t2;
qs = qtest_spapr_boot("-machine pseries");
g_assert(qs != NULL);
t1 = time(NULL);
ret = qrtas_get_time_of_day(qs->alloc, &tm, &ns);
g_assert_cmpint(ret, ==, 0);
t2 = mktimegm(&tm);
g_assert(t2 - t1 < 5); /* 5 sec max to run the test */
qtest_spapr_shutdown(qs);
}
int main(int argc, char *argv[])
{
const char *arch = qtest_get_arch();
g_test_init(&argc, &argv, NULL);
if (strcmp(arch, "ppc64")) {
g_printerr("RTAS requires ppc64-softmmu/qemu-system-ppc64\n");
exit(EXIT_FAILURE);
}
qtest_add_func("rtas/get-time-of-day", test_rtas_get_time_of_day);
return g_test_run();
}