2013-08-21 11:37:21 +02:00
|
|
|
|
/*
|
|
|
|
|
* \brief Genode/Nova specific VirtualBox SUPLib supplements
|
|
|
|
|
* \author Alexander Boettcher
|
|
|
|
|
* \author Norman Feske
|
|
|
|
|
* \author Christian Helmuth
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Copyright (C) 2013-2014 Genode Labs GmbH
|
|
|
|
|
*
|
|
|
|
|
* This file is distributed under the terms of the GNU General Public License
|
|
|
|
|
* version 2.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#ifndef _VCPU_H__
|
|
|
|
|
#define _VCPU_H__
|
|
|
|
|
|
|
|
|
|
/* Genode includes */
|
|
|
|
|
#include <base/printf.h>
|
|
|
|
|
#include <base/semaphore.h>
|
2015-04-14 16:10:11 +02:00
|
|
|
|
#include <util/flex_iterator.h>
|
2013-08-21 11:37:21 +02:00
|
|
|
|
#include <rom_session/connection.h>
|
|
|
|
|
#include <timer_session/connection.h>
|
|
|
|
|
|
|
|
|
|
#include <vmm/vcpu_thread.h>
|
|
|
|
|
#include <vmm/vcpu_dispatcher.h>
|
|
|
|
|
#include <vmm/printf.h>
|
|
|
|
|
|
|
|
|
|
/* NOVA includes that come with Genode */
|
|
|
|
|
#include <nova/syscalls.h>
|
|
|
|
|
|
|
|
|
|
/* VirtualBox includes */
|
|
|
|
|
#include <VBox/vmm/vm.h>
|
|
|
|
|
#include <VBox/err.h>
|
|
|
|
|
#include <VBox/vmm/pdmapi.h>
|
|
|
|
|
|
|
|
|
|
/* Genode's VirtualBox includes */
|
|
|
|
|
#include "sup.h"
|
|
|
|
|
#include "guest_memory.h"
|
|
|
|
|
#include "vmm_memory.h"
|
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
/* Genode libc pthread binding */
|
|
|
|
|
#include "thread.h"
|
|
|
|
|
|
|
|
|
|
/* LibC includes */
|
|
|
|
|
#include <setjmp.h>
|
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
#include <VBox/vmm/rem.h>
|
|
|
|
|
|
|
|
|
|
static bool debug_map_memory = false;
|
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
/*
|
|
|
|
|
* VirtualBox stores segment attributes in Intel format using a 32-bit
|
|
|
|
|
* value. NOVA represents the attributes in packet format using a 16-bit
|
|
|
|
|
* value.
|
|
|
|
|
*/
|
|
|
|
|
static inline Genode::uint16_t sel_ar_conv_to_nova(Genode::uint32_t v)
|
|
|
|
|
{
|
|
|
|
|
return (v & 0xff) | ((v & 0x1f000) >> 4);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static inline Genode::uint32_t sel_ar_conv_from_nova(Genode::uint16_t v)
|
|
|
|
|
{
|
|
|
|
|
return (v & 0xff) | (((uint32_t )v << 4) & 0x1f000);
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
|
|
|
|
/*
|
2014-05-23 09:26:57 +02:00
|
|
|
|
* Used to map mmio memory to VM
|
2014-04-24 10:55:09 +02:00
|
|
|
|
*/
|
2014-05-23 09:26:57 +02:00
|
|
|
|
extern "C" int MMIO2_MAPPED_SYNC(PVM pVM, RTGCPHYS GCPhys, size_t cbWrite,
|
2014-11-11 16:57:01 +01:00
|
|
|
|
void **ppv, Genode::Flexpage_iterator &fli,
|
|
|
|
|
bool &writeable);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
class Vcpu_handler : public Vmm::Vcpu_dispatcher<pthread>
|
2013-08-21 11:37:21 +02:00
|
|
|
|
{
|
|
|
|
|
private:
|
|
|
|
|
|
2014-10-24 23:23:16 +02:00
|
|
|
|
X86FXSTATE _guest_fpu_state __attribute__((aligned(0x10)));
|
|
|
|
|
X86FXSTATE _emt_fpu_state __attribute__((aligned(0x10)));
|
2014-06-16 12:42:57 +02:00
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
Genode::Cap_connection _cap_connection;
|
|
|
|
|
Vmm::Vcpu_other_pd _vcpu;
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Genode::addr_t _ec_sel;
|
|
|
|
|
bool _irq_win;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
void fpu_save(char * data) {
|
|
|
|
|
Assert(!(reinterpret_cast<Genode::addr_t>(data) & 0xF));
|
|
|
|
|
asm volatile ("fxsave %0" : "=m" (*data));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void fpu_load(char * data) {
|
|
|
|
|
Assert(!(reinterpret_cast<Genode::addr_t>(data) & 0xF));
|
|
|
|
|
asm volatile ("fxrstor %0" : : "m" (*data));
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
enum {
|
|
|
|
|
NOVA_REQ_IRQWIN_EXIT = 0x1000U,
|
|
|
|
|
IRQ_INJ_VALID_MASK = 0x80000000UL,
|
|
|
|
|
IRQ_INJ_NONE = 0U,
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Intel® 64 and IA-32 Architectures Software Developer’s Manual
|
|
|
|
|
* Volume 3C, Chapter 24.4.2.
|
|
|
|
|
* May 2012
|
|
|
|
|
*/
|
|
|
|
|
BLOCKING_BY_STI = 1U << 0,
|
|
|
|
|
BLOCKING_BY_MOV_SS = 1U << 1,
|
|
|
|
|
ACTIVITY_STATE_ACTIVE = 0U,
|
|
|
|
|
INTERRUPT_STATE_NONE = 0U,
|
|
|
|
|
};
|
2014-05-23 09:26:57 +02:00
|
|
|
|
|
2014-10-24 23:23:16 +02:00
|
|
|
|
/*
|
|
|
|
|
* 'longjmp()' restores some FPU registers saved by 'setjmp()',
|
|
|
|
|
* so we need to save the guest FPU state before calling 'longjmp()'
|
|
|
|
|
*/
|
|
|
|
|
__attribute__((noreturn)) void _fpu_save_and_longjmp()
|
|
|
|
|
{
|
|
|
|
|
fpu_save(reinterpret_cast<char *>(&_guest_fpu_state));
|
|
|
|
|
longjmp(_env, 1);
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
protected:
|
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
struct {
|
|
|
|
|
Nova::mword_t mtd;
|
|
|
|
|
unsigned intr_state;
|
|
|
|
|
unsigned ctrl[2];
|
|
|
|
|
} next_utcb;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
PVM _current_vm;
|
|
|
|
|
PVMCPU _current_vcpu;
|
|
|
|
|
void * _stack_reply;
|
|
|
|
|
jmp_buf _env;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-11-11 13:16:52 +01:00
|
|
|
|
bool _last_exit_was_recall;
|
|
|
|
|
|
2014-10-24 23:23:16 +02:00
|
|
|
|
void switch_to_hw()
|
|
|
|
|
{
|
2014-04-24 10:55:09 +02:00
|
|
|
|
unsigned long value;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
if (!setjmp(_env)) {
|
|
|
|
|
_stack_reply = reinterpret_cast<void *>(&value - 1);
|
|
|
|
|
Nova::reply(_stack_reply);
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
__attribute__((noreturn)) void _default_handler()
|
2014-04-24 10:55:09 +02:00
|
|
|
|
{
|
|
|
|
|
Nova::Utcb * utcb = reinterpret_cast<Nova::Utcb *>(Thread_base::utcb());
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(utcb->actv_state == ACTIVITY_STATE_ACTIVE);
|
2014-05-23 09:26:57 +02:00
|
|
|
|
Assert(!(utcb->inj_info & IRQ_INJ_VALID_MASK));
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
/* go back to re-compiler */
|
2014-10-24 23:23:16 +02:00
|
|
|
|
_fpu_save_and_longjmp();
|
2014-04-24 10:55:09 +02:00
|
|
|
|
}
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
__attribute__((noreturn)) void _recall_handler()
|
2014-04-24 10:55:09 +02:00
|
|
|
|
{
|
2014-05-23 09:26:57 +02:00
|
|
|
|
Nova::Utcb * utcb = reinterpret_cast<Nova::Utcb *>(Thread_base::utcb());
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(utcb->actv_state == ACTIVITY_STATE_ACTIVE);
|
2014-09-23 13:01:47 +02:00
|
|
|
|
if (utcb->intr_state != INTERRUPT_STATE_NONE)
|
|
|
|
|
Vmm::printf("intr state %x %x\n", utcb->intr_state, utcb->intr_state & 0xF);
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(utcb->intr_state == INTERRUPT_STATE_NONE);
|
2014-05-23 09:26:57 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
if (utcb->inj_info & IRQ_INJ_VALID_MASK) {
|
|
|
|
|
Assert(utcb->flags & X86_EFL_IF);
|
2014-09-23 13:01:47 +02:00
|
|
|
|
/*
|
2014-06-13 14:37:34 +02:00
|
|
|
|
if (!continue_hw_accelerated(utcb))
|
|
|
|
|
Vmm::printf("WARNING - recall ignored during IRQ delivery\n");
|
2014-09-23 13:01:47 +02:00
|
|
|
|
*/
|
2014-06-13 14:37:34 +02:00
|
|
|
|
/* got recall during irq injection and X86_EFL_IF set for
|
|
|
|
|
* delivery of IRQ - just continue */
|
|
|
|
|
Nova::reply(_stack_reply);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* are we forced to go back to emulation mode ? */
|
2014-11-11 13:16:52 +01:00
|
|
|
|
if (!continue_hw_accelerated(utcb)) {
|
|
|
|
|
_last_exit_was_recall = true;
|
2014-06-13 14:37:34 +02:00
|
|
|
|
/* go back to emulation mode */
|
2014-10-24 23:23:16 +02:00
|
|
|
|
_fpu_save_and_longjmp();
|
2014-11-11 13:16:52 +01:00
|
|
|
|
}
|
2014-05-23 09:26:57 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
/* check whether we have to request irq injection window */
|
2014-10-28 13:45:09 +01:00
|
|
|
|
utcb->mtd = Nova::Mtd::FPU;
|
2014-06-13 14:37:34 +02:00
|
|
|
|
if (check_to_request_irq_window(utcb, _current_vcpu)) {
|
|
|
|
|
_irq_win = true;
|
|
|
|
|
Nova::reply(_stack_reply);
|
|
|
|
|
}
|
2014-05-23 09:26:57 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
/* nothing to do at all - continue hardware accelerated */
|
2015-03-02 19:07:08 +01:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(!_irq_win);
|
2015-03-02 19:07:08 +01:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Print a debug message if there actually IS something to do now.
|
|
|
|
|
* This can happen, for example, if one of the worker threads has
|
|
|
|
|
* set a flag in the meantime. Usually, setting a flag is followed
|
|
|
|
|
* by a recall request, but we haven't verified this for each flag
|
|
|
|
|
* yet.
|
|
|
|
|
*/
|
|
|
|
|
continue_hw_accelerated(utcb, true);
|
|
|
|
|
|
2014-05-23 09:26:57 +02:00
|
|
|
|
Nova::reply(_stack_reply);
|
|
|
|
|
}
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
template <unsigned NPT_EPT>
|
|
|
|
|
__attribute__((noreturn)) inline
|
|
|
|
|
void _exc_memory(Genode::Thread_base * myself, Nova::Utcb * utcb,
|
|
|
|
|
bool unmap, Genode::addr_t reason)
|
|
|
|
|
{
|
|
|
|
|
using namespace Nova;
|
|
|
|
|
using namespace Genode;
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(utcb->actv_state == ACTIVITY_STATE_ACTIVE);
|
|
|
|
|
Assert(utcb->intr_state == INTERRUPT_STATE_NONE);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
if (utcb->inj_info & IRQ_INJ_VALID_MASK)
|
|
|
|
|
Vmm::printf("inj_info %x\n", utcb->inj_info);
|
|
|
|
|
|
2014-05-23 09:26:57 +02:00
|
|
|
|
Assert(!(utcb->inj_info & IRQ_INJ_VALID_MASK));
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
|
|
|
|
if (unmap) {
|
|
|
|
|
PERR("unmap not implemented\n");
|
2014-05-23 09:26:57 +02:00
|
|
|
|
Nova::reply(_stack_reply);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
2014-05-23 09:26:57 +02:00
|
|
|
|
|
|
|
|
|
enum { MAP_SIZE = 0x1000UL };
|
|
|
|
|
|
2014-11-11 16:57:01 +01:00
|
|
|
|
bool writeable = true;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
Flexpage_iterator fli;
|
2014-05-23 09:26:57 +02:00
|
|
|
|
void *pv = guest_memory()->lookup_ram(reason, MAP_SIZE, fli);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
if (!pv) {
|
2014-05-23 09:26:57 +02:00
|
|
|
|
/**
|
|
|
|
|
* Check whether this is some mmio memory provided by VMM
|
|
|
|
|
* we can map, e.g. VMMDev memory or framebuffer currently.
|
|
|
|
|
*/
|
2014-11-11 16:57:01 +01:00
|
|
|
|
int res = MMIO2_MAPPED_SYNC(_current_vm, reason, MAP_SIZE, &pv,
|
|
|
|
|
fli, writeable);
|
|
|
|
|
if (res != VINF_SUCCESS)
|
2014-05-23 09:26:57 +02:00
|
|
|
|
pv = 0;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* emulator has to take over if fault region is not ram */
|
2014-04-24 10:55:09 +02:00
|
|
|
|
if (!pv)
|
2014-10-24 23:23:16 +02:00
|
|
|
|
_fpu_save_and_longjmp();
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-11-11 16:57:01 +01:00
|
|
|
|
/* fault region can be mapped - prepare utcb */
|
|
|
|
|
utcb->set_msg_word(0);
|
|
|
|
|
utcb->mtd = Mtd::FPU;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
enum {
|
2014-11-11 16:57:01 +01:00
|
|
|
|
USER_PD = false, GUEST_PGT = true,
|
|
|
|
|
READABLE = true, EXECUTABLE = true
|
2013-08-21 11:37:21 +02:00
|
|
|
|
};
|
|
|
|
|
|
2014-11-11 16:57:01 +01:00
|
|
|
|
Rights permission(READABLE, writeable, EXECUTABLE);
|
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
/* add map items until no space is left on utcb anymore */
|
|
|
|
|
bool res;
|
|
|
|
|
do {
|
|
|
|
|
Flexpage flexpage = fli.page();
|
|
|
|
|
if (!flexpage.valid() || flexpage.log2_order < 12)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
/* touch memory - otherwise no mapping will take place */
|
|
|
|
|
addr_t touch_me = flexpage.addr;
|
|
|
|
|
while (touch_me < flexpage.addr + (1UL << flexpage.log2_order)) {
|
|
|
|
|
touch_read(reinterpret_cast<unsigned char *>(touch_me));
|
|
|
|
|
touch_me += 0x1000UL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Crd crd = Mem_crd(flexpage.addr >> 12, flexpage.log2_order - 12,
|
|
|
|
|
permission);
|
|
|
|
|
res = utcb->append_item(crd, flexpage.hotspot, USER_PD, GUEST_PGT);
|
2014-09-23 13:01:47 +02:00
|
|
|
|
|
|
|
|
|
if (debug_map_memory)
|
|
|
|
|
Vmm::printf("map guest mem %p+%x -> %lx - reason %lx\n",
|
|
|
|
|
flexpage.addr, 1UL << flexpage.log2_order,
|
|
|
|
|
flexpage.hotspot, reason);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
} while (res);
|
|
|
|
|
|
2014-05-23 09:26:57 +02:00
|
|
|
|
Nova::reply(_stack_reply);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* Shortcut for calling 'Vmm::Vcpu_dispatcher::register_handler'
|
|
|
|
|
* with 'Vcpu_dispatcher' as template argument
|
|
|
|
|
*/
|
|
|
|
|
template <unsigned EV, void (Vcpu_handler::*FUNC)()>
|
|
|
|
|
void _register_handler(Genode::addr_t exc_base, Nova::Mtd mtd)
|
|
|
|
|
{
|
|
|
|
|
if (!register_handler<EV, Vcpu_handler, FUNC>(exc_base, mtd))
|
|
|
|
|
PERR("could not register handler %lx", exc_base + EV);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Vmm::Vcpu_other_pd &vcpu() { return _vcpu; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline bool vbox_to_utcb(Nova::Utcb * utcb, VM *pVM, PVMCPU pVCpu)
|
|
|
|
|
{
|
|
|
|
|
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
|
|
|
|
|
|
|
|
|
|
using namespace Nova;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::EIP;
|
|
|
|
|
utcb->ip = pCtx->rip;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::ESP;
|
|
|
|
|
utcb->sp = pCtx->rsp;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::ACDB;
|
|
|
|
|
utcb->ax = pCtx->rax;
|
|
|
|
|
utcb->bx = pCtx->rbx;
|
|
|
|
|
utcb->cx = pCtx->rcx;
|
|
|
|
|
utcb->dx = pCtx->rdx;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::EBSD;
|
|
|
|
|
utcb->bp = pCtx->rbp;
|
|
|
|
|
utcb->si = pCtx->rsi;
|
|
|
|
|
utcb->di = pCtx->rdi;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::EFL;
|
|
|
|
|
utcb->flags = pCtx->rflags.u;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::SYS;
|
|
|
|
|
utcb->sysenter_cs = pCtx->SysEnter.cs;
|
|
|
|
|
utcb->sysenter_sp = pCtx->SysEnter.esp;
|
|
|
|
|
utcb->sysenter_ip = pCtx->SysEnter.eip;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::DR;
|
|
|
|
|
utcb->dr7 = pCtx->dr[7];
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::CR;
|
|
|
|
|
utcb->cr0 = pCtx->cr0;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::CR;
|
|
|
|
|
utcb->cr2 = pCtx->cr2;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::CR;
|
|
|
|
|
utcb->cr3 = pCtx->cr3;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::CR;
|
|
|
|
|
utcb->cr4 = pCtx->cr4;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::IDTR;
|
|
|
|
|
utcb->idtr.limit = pCtx->idtr.cbIdt;
|
|
|
|
|
utcb->idtr.base = pCtx->idtr.pIdt;
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::GDTR;
|
|
|
|
|
utcb->gdtr.limit = pCtx->gdtr.cbGdt;
|
|
|
|
|
utcb->gdtr.base = pCtx->gdtr.pGdt;
|
|
|
|
|
|
2015-01-14 17:31:34 +01:00
|
|
|
|
utcb->mtd |= Mtd::EFER;
|
|
|
|
|
utcb->write_efer(CPUMGetGuestEFER(pVCpu));
|
|
|
|
|
|
2015-07-24 15:03:12 +02:00
|
|
|
|
/*
|
|
|
|
|
* Update the PDPTE registers if necessary
|
|
|
|
|
*
|
|
|
|
|
* Intel manual sections 4.4.1 of Vol. 3A and 26.3.2.4 of Vol. 3C
|
|
|
|
|
* indicate the conditions when this is the case. The following
|
|
|
|
|
* code currently does not check if the recompiler modified any
|
|
|
|
|
* CR registers, which means the update can happen more often
|
|
|
|
|
* than really necessary.
|
|
|
|
|
*/
|
|
|
|
|
if (pVM->hm.s.vmx.fSupported &&
|
|
|
|
|
CPUMIsGuestPagingEnabledEx(pCtx) &&
|
|
|
|
|
CPUMIsGuestInPAEModeEx(pCtx)) {
|
|
|
|
|
|
|
|
|
|
utcb->mtd |= Mtd::PDPTE;
|
|
|
|
|
|
|
|
|
|
Genode::uint64_t *pdpte = (Genode::uint64_t*)
|
|
|
|
|
guest_memory()->lookup(utcb->cr3, sizeof(utcb->pdpte));
|
|
|
|
|
|
|
|
|
|
Assert(pdpte != 0);
|
|
|
|
|
|
|
|
|
|
utcb->pdpte[0] = pdpte[0];
|
|
|
|
|
utcb->pdpte[1] = pdpte[1];
|
|
|
|
|
utcb->pdpte[2] = pdpte[2];
|
|
|
|
|
utcb->pdpte[3] = pdpte[3];
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
Assert(!(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)));
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
inline bool utcb_to_vbox(Nova::Utcb * utcb, VM *pVM, PVMCPU pVCpu)
|
|
|
|
|
{
|
|
|
|
|
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
|
|
|
|
|
|
|
|
|
|
pCtx->rip = utcb->ip;
|
|
|
|
|
pCtx->rsp = utcb->sp;
|
|
|
|
|
|
|
|
|
|
pCtx->rax = utcb->ax;
|
|
|
|
|
pCtx->rbx = utcb->bx;
|
|
|
|
|
pCtx->rcx = utcb->cx;
|
|
|
|
|
pCtx->rdx = utcb->dx;
|
|
|
|
|
|
|
|
|
|
pCtx->rbp = utcb->bp;
|
|
|
|
|
pCtx->rsi = utcb->si;
|
|
|
|
|
pCtx->rdi = utcb->di;
|
|
|
|
|
pCtx->rflags.u = utcb->flags;
|
|
|
|
|
|
|
|
|
|
pCtx->dr[7] = utcb->dr7;
|
|
|
|
|
|
|
|
|
|
if (pCtx->SysEnter.cs != utcb->sysenter_cs)
|
|
|
|
|
CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_CS, utcb->sysenter_cs);
|
|
|
|
|
|
|
|
|
|
if (pCtx->SysEnter.esp != utcb->sysenter_sp)
|
|
|
|
|
CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_ESP, utcb->sysenter_sp);
|
|
|
|
|
|
|
|
|
|
if (pCtx->SysEnter.eip != utcb->sysenter_ip)
|
|
|
|
|
CPUMSetGuestMsr(pVCpu, MSR_IA32_SYSENTER_EIP, utcb->sysenter_ip);
|
|
|
|
|
|
|
|
|
|
if (pCtx->idtr.cbIdt != utcb->idtr.limit ||
|
|
|
|
|
pCtx->idtr.pIdt != utcb->idtr.base)
|
|
|
|
|
CPUMSetGuestIDTR(pVCpu, utcb->idtr.base, utcb->idtr.limit);
|
|
|
|
|
|
|
|
|
|
if (pCtx->gdtr.cbGdt != utcb->gdtr.limit ||
|
|
|
|
|
pCtx->gdtr.pGdt != utcb->gdtr.base)
|
|
|
|
|
CPUMSetGuestGDTR(pVCpu, utcb->gdtr.base, utcb->gdtr.limit);
|
|
|
|
|
|
2015-01-14 17:31:34 +01:00
|
|
|
|
CPUMSetGuestEFER(pVCpu, utcb->read_efer());
|
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
if (pCtx->cr0 != utcb->cr0)
|
|
|
|
|
CPUMSetGuestCR0(pVCpu, utcb->cr0);
|
|
|
|
|
|
|
|
|
|
if (pCtx->cr2 != utcb->cr2)
|
|
|
|
|
CPUMSetGuestCR2(pVCpu, utcb->cr2);
|
|
|
|
|
|
|
|
|
|
if (pCtx->cr3 != utcb->cr3)
|
|
|
|
|
CPUMSetGuestCR3(pVCpu, utcb->cr3);
|
|
|
|
|
|
|
|
|
|
if (pCtx->cr4 != utcb->cr4)
|
|
|
|
|
CPUMSetGuestCR4(pVCpu, utcb->cr4);
|
|
|
|
|
|
|
|
|
|
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
|
|
|
|
|
|
|
|
|
|
/* tell rem compiler that FPU register changed XXX optimizations ? */
|
|
|
|
|
CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM); /* redundant ? XXX */
|
|
|
|
|
pVCpu->cpum.s.fUseFlags |= (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM); /* redundant ? XXX */
|
2014-06-13 14:37:34 +02:00
|
|
|
|
|
|
|
|
|
if (utcb->intr_state != 0) {
|
|
|
|
|
Assert(utcb->intr_state == BLOCKING_BY_STI ||
|
|
|
|
|
utcb->intr_state == BLOCKING_BY_MOV_SS);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
|
2014-06-13 14:37:34 +02:00
|
|
|
|
} else
|
2013-08-21 11:37:21 +02:00
|
|
|
|
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
inline bool check_to_request_irq_window(Nova::Utcb * utcb, PVMCPU pVCpu)
|
2013-08-21 11:37:21 +02:00
|
|
|
|
{
|
2014-06-13 14:37:34 +02:00
|
|
|
|
if (!TRPMHasTrap(pVCpu) &&
|
2014-09-23 13:01:47 +02:00
|
|
|
|
!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC |
|
|
|
|
|
VMCPU_FF_INTERRUPT_PIC)))
|
2014-06-13 14:37:34 +02:00
|
|
|
|
return false;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
unsigned vector = 0;
|
|
|
|
|
utcb->inj_info = NOVA_REQ_IRQWIN_EXIT | vector;
|
|
|
|
|
utcb->mtd |= Nova::Mtd::INJ;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
return true;
|
|
|
|
|
}
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
__attribute__((noreturn)) void _irq_window()
|
|
|
|
|
{
|
|
|
|
|
Nova::Utcb * utcb = reinterpret_cast<Nova::Utcb *>(Thread_base::utcb());
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
PVMCPU pVCpu = _current_vcpu;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(utcb->intr_state == INTERRUPT_STATE_NONE);
|
|
|
|
|
Assert(utcb->flags & X86_EFL_IF);
|
2014-09-23 13:01:47 +02:00
|
|
|
|
Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(!(utcb->inj_info & IRQ_INJ_VALID_MASK));
|
|
|
|
|
|
|
|
|
|
Assert(_irq_win);
|
|
|
|
|
_irq_win = false;
|
|
|
|
|
|
|
|
|
|
if (!TRPMHasTrap(pVCpu)) {
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
bool res = VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(!res);
|
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC |
|
2014-06-13 14:37:34 +02:00
|
|
|
|
VMCPU_FF_INTERRUPT_PIC))) {
|
|
|
|
|
|
|
|
|
|
uint8_t irq;
|
|
|
|
|
int rc = PDMGetInterrupt(pVCpu, &irq);
|
|
|
|
|
Assert(RT_SUCCESS(rc));
|
|
|
|
|
|
|
|
|
|
rc = TRPMAssertTrap(pVCpu, irq, TRPM_HARDWARE_INT);
|
|
|
|
|
Assert(RT_SUCCESS(rc));
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
/*
|
|
|
|
|
* If we have no IRQ for injection, something with requesting the
|
|
|
|
|
* IRQ window went wrong. Probably it was forgotten to be reset.
|
|
|
|
|
*/
|
|
|
|
|
Assert(TRPMHasTrap(pVCpu));
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
/* interrupt can be dispatched */
|
|
|
|
|
uint8_t u8Vector;
|
|
|
|
|
TRPMEVENT enmType;
|
2014-09-23 13:01:47 +02:00
|
|
|
|
SVMEVENT Event;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
RTGCUINT u32ErrorCode;
|
2014-09-23 13:01:47 +02:00
|
|
|
|
RTGCUINTPTR GCPtrFaultAddress;
|
|
|
|
|
uint8_t cbInstr;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
Event.u = 0;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
/* If a new event is pending, then dispatch it now. */
|
2014-09-23 13:01:47 +02:00
|
|
|
|
int rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &u32ErrorCode, 0, 0);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
AssertRC(rc);
|
2014-05-23 09:26:57 +02:00
|
|
|
|
Assert(enmType == TRPM_HARDWARE_INT);
|
2014-09-23 13:01:47 +02:00
|
|
|
|
Assert(u8Vector != X86_XCPT_NMI);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
/* Clear the pending trap. */
|
|
|
|
|
rc = TRPMResetTrap(pVCpu);
|
|
|
|
|
AssertRC(rc);
|
|
|
|
|
|
|
|
|
|
Event.n.u8Vector = u8Vector;
|
|
|
|
|
Event.n.u1Valid = 1;
|
|
|
|
|
Event.n.u32ErrorCode = u32ErrorCode;
|
|
|
|
|
|
|
|
|
|
Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
|
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
utcb->inj_info = Event.u;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
utcb->inj_error = Event.n.u32ErrorCode;
|
|
|
|
|
|
|
|
|
|
/*
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Vmm::printf("type:info:vector %x:%x:%x intr:actv - %x:%x mtd %x\n",
|
|
|
|
|
Event.n.u3Type, utcb->inj_info, u8Vector, utcb->intr_state, utcb->actv_state, utcb->mtd);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
*/
|
2014-10-28 13:45:09 +01:00
|
|
|
|
utcb->mtd = Nova::Mtd::INJ | Nova::Mtd::FPU;
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Nova::reply(_stack_reply);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2015-03-02 19:07:08 +01:00
|
|
|
|
inline bool continue_hw_accelerated(Nova::Utcb * utcb, bool verbose = false)
|
2013-08-21 11:37:21 +02:00
|
|
|
|
{
|
2014-09-23 13:01:47 +02:00
|
|
|
|
Assert(!(VMCPU_FF_IS_SET(_current_vcpu, VMCPU_FF_INHIBIT_INTERRUPTS)));
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
uint32_t check_vm = VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST
|
2014-04-24 10:55:09 +02:00
|
|
|
|
| VM_FF_PGM_POOL_FLUSH_PENDING
|
|
|
|
|
| VM_FF_PDM_DMA;
|
2014-09-23 13:01:47 +02:00
|
|
|
|
uint32_t check_vcpu = VMCPU_FF_HM_TO_R3_MASK
|
2014-04-24 10:55:09 +02:00
|
|
|
|
| VMCPU_FF_PGM_SYNC_CR3
|
|
|
|
|
| VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
|
|
|
|
|
| VMCPU_FF_REQUEST;
|
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
if (!VM_FF_IS_PENDING(_current_vm, check_vm) &&
|
|
|
|
|
!VMCPU_FF_IS_PENDING(_current_vcpu, check_vcpu))
|
2014-06-13 14:37:34 +02:00
|
|
|
|
return true;
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
Assert(!(VM_FF_IS_PENDING(_current_vm, VM_FF_PGM_NO_MEMORY)));
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
2015-03-02 19:07:08 +01:00
|
|
|
|
#define VERBOSE_VM(flag) \
|
|
|
|
|
do { \
|
|
|
|
|
if (VM_FF_IS_PENDING(_current_vm, flag)) \
|
|
|
|
|
Vmm::printf("flag " #flag " pending\n"); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
#define VERBOSE_VMCPU(flag) \
|
|
|
|
|
do { \
|
|
|
|
|
if (VMCPU_FF_IS_PENDING(_current_vcpu, flag)) \
|
|
|
|
|
Vmm::printf("flag " #flag " pending\n"); \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
if (verbose) {
|
|
|
|
|
/*
|
|
|
|
|
* VM_FF_HM_TO_R3_MASK
|
|
|
|
|
*/
|
|
|
|
|
VERBOSE_VM(VM_FF_TM_VIRTUAL_SYNC);
|
|
|
|
|
VERBOSE_VM(VM_FF_PGM_NEED_HANDY_PAGES);
|
|
|
|
|
/* handled by the assertion above */
|
|
|
|
|
/* VERBOSE_VM(VM_FF_PGM_NO_MEMORY); */
|
|
|
|
|
VERBOSE_VM(VM_FF_PDM_QUEUES);
|
|
|
|
|
VERBOSE_VM(VM_FF_EMT_RENDEZVOUS);
|
|
|
|
|
|
|
|
|
|
VERBOSE_VM(VM_FF_REQUEST);
|
|
|
|
|
VERBOSE_VM(VM_FF_PGM_POOL_FLUSH_PENDING);
|
|
|
|
|
VERBOSE_VM(VM_FF_PDM_DMA);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* VMCPU_FF_HM_TO_R3_MASK
|
|
|
|
|
*/
|
|
|
|
|
VERBOSE_VMCPU(VMCPU_FF_TO_R3);
|
|
|
|
|
/* when this flag gets set, a recall request follows */
|
|
|
|
|
/* VERBOSE_VMCPU(VMCPU_FF_TIMER); */
|
|
|
|
|
VERBOSE_VMCPU(VMCPU_FF_PDM_CRITSECT);
|
|
|
|
|
|
|
|
|
|
VERBOSE_VMCPU(VMCPU_FF_PGM_SYNC_CR3);
|
|
|
|
|
VERBOSE_VMCPU(VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
|
|
|
|
|
VERBOSE_VMCPU(VMCPU_FF_REQUEST);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#undef VERBOSE_VMCPU
|
|
|
|
|
#undef VERBOSE_VM
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
return false;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
virtual bool hw_load_state(Nova::Utcb *, VM *, PVMCPU) = 0;
|
|
|
|
|
virtual bool hw_save_state(Nova::Utcb *, VM *, PVMCPU) = 0;
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
|
|
enum Exit_condition
|
|
|
|
|
{
|
|
|
|
|
SVM_NPT = 0xfc,
|
|
|
|
|
SVM_INVALID = 0xfd,
|
|
|
|
|
|
|
|
|
|
VCPU_STARTUP = 0xfe,
|
|
|
|
|
|
|
|
|
|
RECALL = 0xff,
|
|
|
|
|
EMULATE_INSTR = 0x100
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
Vcpu_handler(size_t stack_size, const pthread_attr_t *attr,
|
2014-05-23 09:26:57 +02:00
|
|
|
|
void *(*start_routine) (void *), void *arg,
|
2014-07-16 21:43:41 +02:00
|
|
|
|
Genode::Cpu_session * cpu_session,
|
|
|
|
|
Genode::Affinity::Location location)
|
2013-08-21 11:37:21 +02:00
|
|
|
|
:
|
2014-04-24 10:55:09 +02:00
|
|
|
|
Vmm::Vcpu_dispatcher<pthread>(stack_size, _cap_connection,
|
2014-07-16 21:43:41 +02:00
|
|
|
|
cpu_session, location,
|
2014-04-24 10:55:09 +02:00
|
|
|
|
attr ? *attr : 0, start_routine, arg),
|
2014-07-16 21:43:41 +02:00
|
|
|
|
_vcpu(cpu_session, location),
|
2014-06-13 14:37:34 +02:00
|
|
|
|
_ec_sel(Genode::cap_map()->insert()),
|
|
|
|
|
_irq_win(false)
|
2013-08-21 11:37:21 +02:00
|
|
|
|
{ }
|
|
|
|
|
|
|
|
|
|
void start() {
|
|
|
|
|
_vcpu.start(_ec_sel);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void recall()
|
|
|
|
|
{
|
|
|
|
|
using namespace Nova;
|
|
|
|
|
|
|
|
|
|
if (ec_ctrl(EC_RECALL, _ec_sel) != NOVA_OK) {
|
|
|
|
|
PERR("recall failed");
|
|
|
|
|
Genode::Lock lock(Genode::Lock::LOCKED);
|
|
|
|
|
lock.lock();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline void dump_register_state(PCPUMCTX pCtx)
|
|
|
|
|
{
|
|
|
|
|
PINF("pCtx");
|
|
|
|
|
PLOG("ip:sp:efl ax:bx:cx:dx:si:di %llx:%llx:%llx"
|
|
|
|
|
" %llx:%llx:%llx:%llx:%llx:%llx",
|
|
|
|
|
pCtx->rip, pCtx->rsp, pCtx->rflags.u, pCtx->rax, pCtx->rbx,
|
|
|
|
|
pCtx->rcx, pCtx->rdx, pCtx->rsi, pCtx->rdi);
|
|
|
|
|
|
|
|
|
|
PLOG("cs.attr.n.u4LimitHigh=0x%x", pCtx->cs.Attr.n.u4LimitHigh);
|
|
|
|
|
|
|
|
|
|
PLOG("cs base:limit:sel:ar %llx:%x:%x:%x", pCtx->cs.u64Base,
|
|
|
|
|
pCtx->cs.u32Limit, pCtx->cs.Sel, pCtx->cs.Attr.u);
|
|
|
|
|
PLOG("ds base:limit:sel:ar %llx:%x:%x:%x", pCtx->ds.u64Base,
|
|
|
|
|
pCtx->ds.u32Limit, pCtx->ds.Sel, pCtx->ds.Attr.u);
|
|
|
|
|
PLOG("es base:limit:sel:ar %llx:%x:%x:%x", pCtx->es.u64Base,
|
|
|
|
|
pCtx->es.u32Limit, pCtx->es.Sel, pCtx->es.Attr.u);
|
|
|
|
|
PLOG("fs base:limit:sel:ar %llx:%x:%x:%x", pCtx->fs.u64Base,
|
|
|
|
|
pCtx->fs.u32Limit, pCtx->fs.Sel, pCtx->fs.Attr.u);
|
|
|
|
|
PLOG("gs base:limit:sel:ar %llx:%x:%x:%x", pCtx->gs.u64Base,
|
|
|
|
|
pCtx->gs.u32Limit, pCtx->gs.Sel, pCtx->gs.Attr.u);
|
|
|
|
|
PLOG("ss base:limit:sel:ar %llx:%x:%x:%x", pCtx->ss.u64Base,
|
|
|
|
|
pCtx->ss.u32Limit, pCtx->ss.Sel, pCtx->ss.Attr.u);
|
|
|
|
|
|
|
|
|
|
PLOG("cr0:cr2:cr3:cr4 %llx:%llx:%llx:%llx",
|
|
|
|
|
pCtx->cr0, pCtx->cr2, pCtx->cr3, pCtx->cr4);
|
|
|
|
|
|
|
|
|
|
PLOG("ldtr base:limit:sel:ar %llx:%x:%x:%x", pCtx->ldtr.u64Base,
|
|
|
|
|
pCtx->ldtr.u32Limit, pCtx->ldtr.Sel, pCtx->ldtr.Attr.u);
|
|
|
|
|
PLOG("tr base:limit:sel:ar %llx:%x:%x:%x", pCtx->tr.u64Base,
|
|
|
|
|
pCtx->tr.u32Limit, pCtx->tr.Sel, pCtx->tr.Attr.u);
|
|
|
|
|
|
|
|
|
|
PLOG("gdtr base:limit %llx:%x", pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt);
|
|
|
|
|
PLOG("idtr base:limit %llx:%x", pCtx->idtr.pIdt, pCtx->idtr.cbIdt);
|
|
|
|
|
|
|
|
|
|
PLOG("dr 0:1:2:3:4:5:6:7 %llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
|
|
|
|
|
pCtx->dr[0], pCtx->dr[1], pCtx->dr[2], pCtx->dr[3],
|
|
|
|
|
pCtx->dr[4], pCtx->dr[5], pCtx->dr[6], pCtx->dr[7]);
|
|
|
|
|
|
|
|
|
|
PLOG("sysenter cs:eip:esp %llx %llx %llx", pCtx->SysEnter.cs,
|
|
|
|
|
pCtx->SysEnter.eip, pCtx->SysEnter.esp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
inline void dump_register_state(Nova::Utcb * utcb)
|
|
|
|
|
{
|
|
|
|
|
PINF("utcb");
|
|
|
|
|
PLOG("ip:sp:efl ax:bx:cx:dx:si:di %lx:%lx:%lx"
|
|
|
|
|
" %lx:%lx:%lx:%lx:%lx:%lx",
|
|
|
|
|
utcb->ip, utcb->sp, utcb->flags, utcb->ax, utcb->bx,
|
|
|
|
|
utcb->cx, utcb->dx, utcb->si, utcb->di);
|
|
|
|
|
|
|
|
|
|
PLOG("cs base:limit:sel:ar %lx:%x:%x:%x", utcb->cs.base,
|
|
|
|
|
utcb->cs.limit, utcb->cs.sel, utcb->cs.ar);
|
|
|
|
|
PLOG("ds base:limit:sel:ar %lx:%x:%x:%x", utcb->ds.base,
|
|
|
|
|
utcb->ds.limit, utcb->ds.sel, utcb->ds.ar);
|
|
|
|
|
PLOG("es base:limit:sel:ar %lx:%x:%x:%x", utcb->es.base,
|
|
|
|
|
utcb->es.limit, utcb->es.sel, utcb->es.ar);
|
|
|
|
|
PLOG("fs base:limit:sel:ar %lx:%x:%x:%x", utcb->fs.base,
|
|
|
|
|
utcb->fs.limit, utcb->fs.sel, utcb->fs.ar);
|
|
|
|
|
PLOG("gs base:limit:sel:ar %lx:%x:%x:%x", utcb->gs.base,
|
|
|
|
|
utcb->gs.limit, utcb->gs.sel, utcb->gs.ar);
|
|
|
|
|
PLOG("ss base:limit:sel:ar %lx:%x:%x:%x", utcb->ss.base,
|
|
|
|
|
utcb->ss.limit, utcb->ss.sel, utcb->ss.ar);
|
|
|
|
|
|
|
|
|
|
PLOG("cr0:cr2:cr3:cr4 %lx:%lx:%lx:%lx",
|
|
|
|
|
utcb->cr0, utcb->cr2, utcb->cr3, utcb->cr4);
|
|
|
|
|
|
|
|
|
|
PLOG("ldtr base:limit:sel:ar %lx:%x:%x:%x", utcb->ldtr.base,
|
|
|
|
|
utcb->ldtr.limit, utcb->ldtr.sel, utcb->ldtr.ar);
|
|
|
|
|
PLOG("tr base:limit:sel:ar %lx:%x:%x:%x", utcb->tr.base,
|
|
|
|
|
utcb->tr.limit, utcb->tr.sel, utcb->tr.ar);
|
|
|
|
|
|
|
|
|
|
PLOG("gdtr base:limit %lx:%x", utcb->gdtr.base, utcb->gdtr.limit);
|
|
|
|
|
PLOG("idtr base:limit %lx:%x", utcb->idtr.base, utcb->idtr.limit);
|
|
|
|
|
|
|
|
|
|
PLOG("dr 7 %lx", utcb->dr7);
|
|
|
|
|
|
|
|
|
|
PLOG("sysenter cs:eip:esp %lx %lx %lx", utcb->sysenter_cs,
|
|
|
|
|
utcb->sysenter_ip, utcb->sysenter_sp);
|
|
|
|
|
|
|
|
|
|
PLOG("%x %x %x", utcb->intr_state, utcb->actv_state, utcb->mtd);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int run_hw(PVMR0 pVMR0, VMCPUID idCpu)
|
|
|
|
|
{
|
|
|
|
|
VM * pVM = reinterpret_cast<VM *>(pVMR0);
|
|
|
|
|
PVMCPU pVCpu = &pVM->aCpus[idCpu];
|
|
|
|
|
PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
|
|
|
|
|
|
|
|
|
|
Nova::Utcb *utcb = reinterpret_cast<Nova::Utcb *>(Thread_base::utcb());
|
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
Assert(Thread_base::utcb() == Thread_base::myself()->utcb());
|
|
|
|
|
|
|
|
|
|
/* take the utcb state prepared during the last exit */
|
|
|
|
|
utcb->mtd = next_utcb.mtd;
|
2014-06-13 14:37:34 +02:00
|
|
|
|
utcb->inj_info = IRQ_INJ_NONE;
|
2014-04-24 10:55:09 +02:00
|
|
|
|
utcb->intr_state = next_utcb.intr_state;
|
2014-06-13 14:37:34 +02:00
|
|
|
|
utcb->actv_state = ACTIVITY_STATE_ACTIVE;
|
2014-04-24 10:55:09 +02:00
|
|
|
|
utcb->ctrl[0] = next_utcb.ctrl[0];
|
|
|
|
|
utcb->ctrl[1] = next_utcb.ctrl[1];
|
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
using namespace Nova;
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
/* Transfer vCPU state from vBox to Nova format */
|
|
|
|
|
if (!vbox_to_utcb(utcb, pVM, pVCpu) ||
|
|
|
|
|
!hw_load_state(utcb, pVM, pVCpu)) {
|
|
|
|
|
|
|
|
|
|
PERR("loading vCPU state failed");
|
2014-04-24 10:55:09 +02:00
|
|
|
|
return VERR_INTERNAL_ERROR;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
/* check whether to request interrupt window for injection */
|
|
|
|
|
_irq_win = check_to_request_irq_window(utcb, pVCpu);
|
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
/*
|
|
|
|
|
* Flag vCPU to be "pokeable" by external events such as interrupts
|
|
|
|
|
* from virtual devices. Only if this flag is set, the
|
|
|
|
|
* 'vmR3HaltGlobal1NotifyCpuFF' function calls 'SUPR3CallVMMR0Ex'
|
|
|
|
|
* with VMMR0_DO_GVMM_SCHED_POKE as argument to indicate such
|
|
|
|
|
* events. This function, in turn, will recall the vCPU.
|
|
|
|
|
*/
|
|
|
|
|
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
|
|
|
|
|
|
2014-06-16 12:42:57 +02:00
|
|
|
|
/* save current FPU state */
|
2014-10-24 23:23:16 +02:00
|
|
|
|
fpu_save(reinterpret_cast<char *>(&_emt_fpu_state));
|
2014-06-16 12:42:57 +02:00
|
|
|
|
/* write FPU state from pCtx to FPU registers */
|
2014-04-24 10:55:09 +02:00
|
|
|
|
fpu_load(reinterpret_cast<char *>(&pCtx->fpu));
|
2014-06-16 12:42:57 +02:00
|
|
|
|
/* tell kernel to transfer current fpu registers to vCPU */
|
2014-04-24 10:55:09 +02:00
|
|
|
|
utcb->mtd |= Mtd::FPU;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
_current_vm = pVM;
|
|
|
|
|
_current_vcpu = pVCpu;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-11-11 13:16:52 +01:00
|
|
|
|
_last_exit_was_recall = false;
|
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
/* switch to hardware accelerated mode */
|
2014-10-24 23:23:16 +02:00
|
|
|
|
switch_to_hw();
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert(utcb->actv_state == ACTIVITY_STATE_ACTIVE);
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
|
|
|
|
_current_vm = 0;
|
|
|
|
|
_current_vcpu = 0;
|
|
|
|
|
|
2014-06-16 12:42:57 +02:00
|
|
|
|
/* write FPU state of vCPU (in current FPU registers) to pCtx */
|
2014-10-24 23:23:16 +02:00
|
|
|
|
Genode::memcpy(&pCtx->fpu, &_guest_fpu_state, sizeof(X86FXSTATE));
|
|
|
|
|
|
2014-06-16 12:42:57 +02:00
|
|
|
|
/* load saved FPU state of EMT thread */
|
2014-10-24 23:23:16 +02:00
|
|
|
|
fpu_load(reinterpret_cast<char *>(&_emt_fpu_state));
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
2014-10-21 18:47:56 +02:00
|
|
|
|
CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
|
2013-08-21 11:37:21 +02:00
|
|
|
|
|
|
|
|
|
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
|
|
|
|
|
|
|
|
|
|
/* Transfer vCPU state from Nova to vBox format */
|
|
|
|
|
if (!utcb_to_vbox(utcb, pVM, pVCpu) ||
|
|
|
|
|
!hw_save_state(utcb, pVM, pVCpu)) {
|
2014-04-24 10:55:09 +02:00
|
|
|
|
|
2013-08-21 11:37:21 +02:00
|
|
|
|
PERR("saving vCPU state failed");
|
2014-04-24 10:55:09 +02:00
|
|
|
|
return VERR_INTERNAL_ERROR;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* reset message transfer descriptor for next invocation */
|
2014-06-13 14:37:34 +02:00
|
|
|
|
Assert (!(utcb->inj_info & IRQ_INJ_VALID_MASK));
|
|
|
|
|
/* Reset irq window next time if we are still requesting it */
|
|
|
|
|
next_utcb.mtd = _irq_win ? Mtd::INJ : 0;
|
|
|
|
|
|
2014-04-24 10:55:09 +02:00
|
|
|
|
next_utcb.intr_state = utcb->intr_state;
|
|
|
|
|
next_utcb.ctrl[0] = utcb->ctrl[0];
|
|
|
|
|
next_utcb.ctrl[1] = utcb->ctrl[1];
|
|
|
|
|
|
|
|
|
|
if (next_utcb.intr_state & 3) {
|
|
|
|
|
next_utcb.intr_state &= ~3U;
|
|
|
|
|
next_utcb.mtd |= Mtd::STA;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-23 13:01:47 +02:00
|
|
|
|
#ifdef VBOX_WITH_REM
|
|
|
|
|
/* XXX see VMM/VMMR0/HMVMXR0.cpp - not necessary every time ! XXX */
|
|
|
|
|
REMFlushTBs(pVM);
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-11-11 13:16:52 +01:00
|
|
|
|
return _last_exit_was_recall ? VINF_SUCCESS : VINF_EM_RAW_EMULATE_INSTR;
|
2013-08-21 11:37:21 +02:00
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#endif /* _VCPU_H__ */
|