[RFC 1/2] misc: Add vboxguest driver for Virtual Box Guest integration
From: Hans de Goede
Date: Fri Aug 11 2017 - 09:23:43 EST
This commit adds a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add support for
Virtual Box Guest integration features such as copy-and-paste, seamless
mode and OpenGL pass-through.
This driver also offers vboxguest IPC functionality which is needed
for the vboxfs driver which offers folder sharing support.
Signed-off-by: Hans de Goede <hdegoede@xxxxxxxxxx>
---
drivers/misc/Kconfig | 1 +
drivers/misc/Makefile | 1 +
drivers/misc/vboxguest/Kconfig | 16 +
drivers/misc/vboxguest/Makefile | 3 +
drivers/misc/vboxguest/vboxguest_core.c | 1818 ++++++++++++++++++++++++++++
drivers/misc/vboxguest/vboxguest_core.h | 194 +++
drivers/misc/vboxguest/vboxguest_linux.c | 464 +++++++
drivers/misc/vboxguest/vboxguest_utils.c | 1124 +++++++++++++++++
drivers/misc/vboxguest/vboxguest_version.h | 18 +
include/linux/vbox_err.h | 6 +
include/linux/vbox_ostypes.h | 6 +
include/linux/vbox_utils.h | 98 ++
include/linux/vbox_vmmdev.h | 128 ++
include/linux/vboxguest.h | 6 +
include/uapi/linux/vbox_err.h | 178 +++
include/uapi/linux/vbox_ostypes.h | 158 +++
include/uapi/linux/vbox_vmmdev.h | 1743 ++++++++++++++++++++++++++
include/uapi/linux/vboxguest.h | 374 ++++++
18 files changed, 6336 insertions(+)
create mode 100644 drivers/misc/vboxguest/Kconfig
create mode 100644 drivers/misc/vboxguest/Makefile
create mode 100644 drivers/misc/vboxguest/vboxguest_core.c
create mode 100644 drivers/misc/vboxguest/vboxguest_core.h
create mode 100644 drivers/misc/vboxguest/vboxguest_linux.c
create mode 100644 drivers/misc/vboxguest/vboxguest_utils.c
create mode 100644 drivers/misc/vboxguest/vboxguest_version.h
create mode 100644 include/linux/vbox_err.h
create mode 100644 include/linux/vbox_ostypes.h
create mode 100644 include/linux/vbox_utils.h
create mode 100644 include/linux/vbox_vmmdev.h
create mode 100644 include/linux/vboxguest.h
create mode 100644 include/uapi/linux/vbox_err.h
create mode 100644 include/uapi/linux/vbox_ostypes.h
create mode 100644 include/uapi/linux/vbox_vmmdev.h
create mode 100644 include/uapi/linux/vboxguest.h
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 8136dc7e863d..ca8bb2c4b62c 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -518,4 +518,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/vboxguest/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b0b766416306..301b69224929 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
+obj-y += vboxguest/
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
diff --git a/drivers/misc/vboxguest/Kconfig b/drivers/misc/vboxguest/Kconfig
new file mode 100644
index 000000000000..e88ee46c31d4
--- /dev/null
+++ b/drivers/misc/vboxguest/Kconfig
@@ -0,0 +1,16 @@
+config VBOXGUEST
+ tristate "Virtual Box Guest integration support"
+ depends on X86 && PCI && INPUT
+ help
+ This is a driver for the Virtual Box Guest PCI device used in
+ Virtual Box virtual machines. Enabling this driver will add
+ support for Virtual Box Guest integration features such as
+ copy-and-paste, seamless mode and OpenGL pass-through.
+
+ This driver also offers vboxguest IPC functionality which is needed
+ for the vboxfs driver which offers folder sharing support.
+
+ Although it is possible to build this module in, it is advised
+ to build this driver as a module, so that it can be updated
+ independently of the kernel. Select M to build this driver as a
+ module.
diff --git a/drivers/misc/vboxguest/Makefile b/drivers/misc/vboxguest/Makefile
new file mode 100644
index 000000000000..203b8f465817
--- /dev/null
+++ b/drivers/misc/vboxguest/Makefile
@@ -0,0 +1,3 @@
+vboxguest-y := vboxguest_linux.o vboxguest_core.o vboxguest_utils.o
+
+obj-$(CONFIG_VBOXGUEST) += vboxguest.o
diff --git a/drivers/misc/vboxguest/vboxguest_core.c b/drivers/misc/vboxguest/vboxguest_core.c
new file mode 100644
index 000000000000..50d18a37222f
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_core.c
@@ -0,0 +1,1818 @@
+/*
+ * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
+ *
+ * Copyright (C) 2007-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include <linux/vmalloc.h>
+#include "vboxguest_core.h"
+#include "vboxguest_version.h"
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define GUEST_MAPPINGS_TRIES 5
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
+static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT gdev,
+ u32 fFixedEvents);
+static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT gdev);
+static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ u32 fOrMask, u32 fNotMask,
+ bool fSessionTermination);
+static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ u32 fOrMask, u32 fNoMask,
+ bool fSessionTermination);
+
+/**
+ * Reserves memory in which the VMM can relocate any guest mappings
+ * that are floating around.
+ *
+ * This operation is a little bit tricky since the VMM might not accept
+ * just any address because of address clashes between the three contexts
+ * it operates in, so we try several times.
+ *
+ * Failure to reserve the guest mappings is ignored.
+ *
+ * @param gdev The Guest extension device.
+ */
+static void vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT gdev)
+{
+ VMMDevReqHypervisorInfo *req = NULL;
+ void *guest_mappings[GUEST_MAPPINGS_TRIES];
+ struct page **pages = NULL;
+ u32 size, hypervisor_size;
+ int i, rc;
+
+ /* Query the required space. */
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_GetHypervisorInfo);
+ if (!req)
+ return;
+
+ req->hypervisorStart = 0;
+ req->hypervisorSize = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ goto out;
+
+ /*
+ * The VMM will report back if there is nothing it wants to map, like
+ * for instance in VT-x and AMD-V mode.
+ */
+ if (req->hypervisorSize == 0)
+ goto out;
+
+ hypervisor_size = req->hypervisorSize;
+ /* Add 4M so that we can align the vmap to 4MiB as the host requires. */
+ size = PAGE_ALIGN(req->hypervisorSize) + SZ_4M;
+
+ pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL);
+ if (!pages)
+ goto out;
+
+ gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
+ if (!gdev->guest_mappings_dummy_page)
+ goto out;
+
+ for (i = 0; i < (size >> PAGE_SHIFT); i++)
+ pages[i] = gdev->guest_mappings_dummy_page;
+
+ /* Try several times, the host can be picky about certain addresses. */
+ for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
+ guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
+ VM_MAP, PAGE_KERNEL_RO);
+ if (!guest_mappings[i])
+ break;
+
+ req->header.requestType = VMMDevReq_SetHypervisorInfo;
+ req->header.rc = VERR_INTERNAL_ERROR;
+ req->hypervisorSize = hypervisor_size;
+ req->hypervisorStart =
+ (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc >= 0) {
+ gdev->guest_mappings = guest_mappings[i];
+ break;
+ }
+ }
+
+ /* Free vmap's from failed attempts. */
+ while (--i >= 0)
+ vunmap(guest_mappings[i]);
+
+ /* On failure free the dummy-page backing the vmap */
+ if (!gdev->guest_mappings) {
+ __free_page(gdev->guest_mappings_dummy_page);
+ gdev->guest_mappings_dummy_page = NULL;
+ }
+
+out:
+ kfree(req);
+ kfree(pages);
+}
+
+/**
+ * Undo what vgdrvInitFixateGuestMappings did.
+ *
+ * @param gdev The Guest extension device.
+ */
+static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT gdev)
+{
+ VMMDevReqHypervisorInfo *req;
+ int rc;
+
+ if (!gdev->guest_mappings)
+ return;
+
+ /*
+ * Tell the host that we're going to free the memory we reserved for
+ * it, the free it up. (Leak the memory if anything goes wrong here.)
+ */
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetHypervisorInfo);
+ if (!req)
+ return;
+
+ req->hypervisorStart = 0;
+ req->hypervisorSize = 0;
+
+ rc = vbg_req_perform(gdev, req);
+
+ kfree(req);
+
+ if (rc < 0) {
+ vbg_err("vgdrvTermUnfixGuestMappings: vbg_req_perform error: %d\n",
+ rc);
+ return;
+ }
+
+ vunmap(gdev->guest_mappings);
+ gdev->guest_mappings = NULL;
+
+ __free_page(gdev->guest_mappings_dummy_page);
+ gdev->guest_mappings_dummy_page = NULL;
+}
+
+/**
+ * Report the guest information to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ */
+static int vbg_report_guest_info(PVBOXGUESTDEVEXT gdev)
+{
+ /*
+ * Allocate and fill in the two guest info reports.
+ */
+ VMMDevReportGuestInfo *req1 = NULL;
+ VMMDevReportGuestInfo2 *req2 = NULL;
+ int rc, ret = -ENOMEM;
+
+ req1 = vbg_req_alloc(sizeof(*req1), VMMDevReq_ReportGuestInfo);
+ req2 = vbg_req_alloc(sizeof(*req2), VMMDevReq_ReportGuestInfo2);
+ if (!req1 || !req2)
+ goto out_free;
+
+ req1->guestInfo.interfaceVersion = VMMDEV_VERSION;
+#ifdef CONFIG_X86_64
+ req1->guestInfo.osType = VBOXOSTYPE_Linux26_x64;
+#else
+ req1->guestInfo.osType = VBOXOSTYPE_Linux26;
+#endif
+
+ req2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
+ req2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
+ req2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
+ req2->guestInfo.additionsRevision = VBOX_SVN_REV;
+ /* (no features defined yet) */
+ req2->guestInfo.additionsFeatures = 0;
+ strlcpy(req2->guestInfo.szName, VBOX_VERSION_STRING,
+ sizeof(req2->guestInfo.szName));
+
+ /*
+ * There are two protocols here:
+ * 1. Info2 + Info1. Supported by >=3.2.51.
+ * 2. Info1 and optionally Info2. The old protocol.
+ *
+ * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
+ * if not supported by the VMMDev (message ordering requirement).
+ */
+ rc = vbg_req_perform(gdev, req2);
+ if (rc >= 0) {
+ rc = vbg_req_perform(gdev, req1);
+ } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
+ rc = vbg_req_perform(gdev, req1);
+ if (rc >= 0) {
+ rc = vbg_req_perform(gdev, req2);
+ if (rc == VERR_NOT_IMPLEMENTED)
+ rc = VINF_SUCCESS;
+ }
+ }
+ ret = -vbg_status_code_to_errno(rc);
+
+out_free:
+ kfree(req2);
+ kfree(req1);
+ return ret;
+}
+
+/**
+ * Report the guest driver status to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ * @param active Flag whether the driver is now active or not.
+ */
+static int vgdrvReportDriverStatus(PVBOXGUESTDEVEXT gdev, bool active)
+{
+ VMMDevReportGuestStatus *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_ReportGuestStatus);
+ if (!req)
+ return -ENOMEM;
+
+ req->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
+ req->guestStatus.status = active ? VBoxGuestFacilityStatus_Active :
+ VBoxGuestFacilityStatus_Inactive;
+ req->guestStatus.flags = 0;
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
+ rc = VINF_SUCCESS;
+
+ kfree(req);
+
+ return -vbg_status_code_to_errno(rc);
+}
+
+/** @name Memory Ballooning
+ * @{
+ */
+
+/**
+ * Inflate the balloon by one chunk.
+ *
+ * The caller owns the balloon mutex.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param chunk_idx Index of the chunk.
+ */
+static int vbg_balloon_inflate(PVBOXGUESTDEVEXT gdev, u32 chunk_idx)
+{
+ VMMDevChangeMemBalloon *req = gdev->mem_balloon.change_req;
+ struct page **pages;
+ int i, rc;
+
+ pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!pages)
+ return VERR_NO_MEMORY;
+
+ req->header.size = sizeof(*req);
+ req->inflate = true;
+ req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
+ pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+ if (!pages[i]) {
+ rc = VERR_NO_MEMORY;
+ goto out_error;
+ }
+
+ req->phys_page[i] = page_to_phys(pages[i]);
+ }
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ vbg_err("vbg_balloon_inflate: vbg_req_perform error: %d\n", rc);
+ goto out_error;
+ }
+
+ gdev->mem_balloon.pages[chunk_idx] = pages;
+
+ return VINF_SUCCESS;
+
+out_error:
+ while (--i >= 0)
+ __free_page(pages[i]);
+ kfree(pages);
+
+ return rc;
+}
+
+/**
+ * Deflate the balloon by one chunk.
+ *
+ * The caller owns the balloon mutex.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param chunk_idx Index of the chunk.
+ */
+static int vbg_balloon_deflate(PVBOXGUESTDEVEXT gdev, u32 chunk_idx)
+{
+ VMMDevChangeMemBalloon *req = gdev->mem_balloon.change_req;
+ struct page **pages = gdev->mem_balloon.pages[chunk_idx];
+ int i, rc;
+
+ req->header.size = sizeof(*req);
+ req->inflate = false;
+ req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+ req->phys_page[i] = page_to_phys(pages[i]);
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ vbg_err("vbg_balloon_deflate: vbg_req_perform error: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ gdev->mem_balloon.pages[chunk_idx] = NULL;
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Cleanup the memory balloon of a session.
+ *
+ * Will request the balloon mutex, so it must be valid and the caller must not
+ * own it already.
+ *
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ */
+static void vbg_balloon_close(PVBOXGUESTDEVEXT gdev, PVBOXGUESTSESSION session)
+{
+ int i, rc;
+
+ mutex_lock(&gdev->mem_balloon.mutex);
+
+ if (gdev->mem_balloon.owner == session) {
+ for (i = gdev->mem_balloon.chunks - 1; i >= 0; i--) {
+ rc = vbg_balloon_deflate(gdev, i);
+ if (rc < 0)
+ break;
+
+ gdev->mem_balloon.chunks--;
+ }
+ gdev->mem_balloon.owner = NULL;
+ }
+
+ mutex_unlock(&gdev->mem_balloon.mutex);
+}
+
+/** @} */
+
+/** @name Heartbeat
+ * @{
+ */
+
+/**
+ * Callback for heartbeat timer.
+ */
+static void vbg_heartbeat_timer(unsigned long data)
+{
+ PVBOXGUESTDEVEXT gdev = (PVBOXGUESTDEVEXT)data;
+
+ vbg_req_perform(gdev, gdev->guest_heartbeat_req);
+ mod_timer(&gdev->heartbeat_timer,
+ msecs_to_jiffies(gdev->heartbeat_interval_ms));
+}
+
+/**
+ * Configure the host to check guest's heartbeat
+ * and get heartbeat interval from the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ * @param fEnabled Set true to enable guest heartbeat checks on host.
+ */
+static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT gdev, bool fEnabled)
+{
+ VMMDevReqHeartbeat *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_HeartbeatConfigure);
+ if (!req)
+ return -ENOMEM;
+
+ req->fEnabled = fEnabled;
+ req->cNsInterval = 0;
+ rc = vbg_req_perform(gdev, req);
+ do_div(req->cNsInterval, 1000000); /* ns -> ms */
+ gdev->heartbeat_interval_ms = req->cNsInterval;
+ kfree(req);
+
+ return -vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Initializes the heartbeat timer.
+ *
+ * This feature may be disabled by the host.
+ *
+ * @returns 0 or negative errno value (ignored).
+ * @param gdev The Guest extension device.
+ */
+static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT gdev)
+{
+ int ret;
+
+ /* Make sure that heartbeat checking is disabled if we fail. */
+ ret = vgdrvHeartbeatHostConfigure(gdev, false);
+ if (ret < 0)
+ return ret;
+
+ ret = vgdrvHeartbeatHostConfigure(gdev, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Preallocate the request to use it from the timer callback because:
+ * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
+ * and the timer callback runs at DISPATCH_LEVEL;
+ * 2) avoid repeated allocations.
+ */
+ gdev->guest_heartbeat_req = vbg_req_alloc(
+ sizeof(*gdev->guest_heartbeat_req),
+ VMMDevReq_GuestHeartbeat);
+ if (!gdev->guest_heartbeat_req)
+ return -ENOMEM;
+
+ vbg_info("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %d milliseconds\n",
+ gdev->heartbeat_interval_ms);
+ mod_timer(&gdev->heartbeat_timer, 0);
+
+ return 0;
+}
+
+/** @} */
+
+/**
+ * vbg_query_host_version try get the host feature mask and version information
+ * (vbg_host_version).
+ *
+ * @returns 0 or negative errno value (ignored).
+ * @param gdev The Guest extension device.
+ */
+static int vbg_query_host_version(PVBOXGUESTDEVEXT gdev)
+{
+ VMMDevReqHostVersion *req;
+ int rc, ret;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_GetHostVersion);
+ if (!req)
+ return -ENOMEM;
+
+ rc = vbg_req_perform(gdev, req);
+ ret = -vbg_status_code_to_errno(rc);
+ if (ret)
+ goto out;
+
+ vbg_info("vboxguest: host-version: %u.%u.%ur%u %#x\n",
+ req->major, req->minor, req->build,
+ req->revision, req->features);
+
+ if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
+ vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
+ ret = -ENODEV;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+/**
+ * Initializes the VBoxGuest device extension when the
+ * device driver is loaded.
+ *
+ * The native code locates the VMMDev on the PCI bus and retrieve
+ * the MMIO and I/O port ranges, this function will take care of
+ * mapping the MMIO memory (if present). Upon successful return
+ * the native code should set up the interrupt handler.
+ *
+ * @returns 0 or negative errno value.
+ *
+ * @param gdev The Guest extension device.
+ * @param fixed_events Events that will be enabled upon init and no client
+ * will ever be allowed to mask.
+ */
+int vbg_core_init(PVBOXGUESTDEVEXT gdev, u32 fixed_events)
+{
+ int ret = -ENOMEM;
+
+ gdev->fFixedEvents = fixed_events | VMMDEV_EVENT_HGCM;
+ gdev->fEventFilterHost = U32_MAX; /* forces a report */
+ gdev->fGuestCapsHost = U32_MAX; /* forces a report */
+
+ init_waitqueue_head(&gdev->event_wq);
+ init_waitqueue_head(&gdev->hgcm_wq);
+ INIT_LIST_HEAD(&gdev->session_list);
+ spin_lock_init(&gdev->event_spinlock);
+ spin_lock_init(&gdev->session_spinlock);
+ mutex_init(&gdev->cancel_req_mutex);
+ mutex_init(&gdev->mem_balloon.mutex);
+ setup_timer(&gdev->heartbeat_timer, vbg_heartbeat_timer,
+ (unsigned long)gdev);
+
+ vgdrvBitUsageTrackerClear(&gdev->EventFilterTracker);
+ vgdrvBitUsageTrackerClear(&gdev->SetGuestCapsTracker);
+
+ gdev->mem_balloon.change_req =
+ vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
+ VMMDevReq_ChangeMemBalloon);
+ gdev->cancel_req =
+ vbg_req_alloc(sizeof(*(gdev->cancel_req)),
+ VMMDevReq_HGCMCancel2);
+ gdev->pIrqAckEvents =
+ vbg_req_alloc(sizeof(*gdev->pIrqAckEvents),
+ VMMDevReq_AcknowledgeEvents);
+ gdev->mouse_status_req =
+ vbg_req_alloc(sizeof(*gdev->mouse_status_req),
+ VMMDevReq_GetMouseStatus);
+
+ if (!gdev->mem_balloon.change_req || !gdev->cancel_req ||
+ !gdev->pIrqAckEvents || !gdev->mouse_status_req)
+ goto err_free_reqs;
+
+ ret = vbg_query_host_version(gdev);
+ if (ret)
+ goto err_free_reqs;
+
+ ret = vbg_report_guest_info(gdev);
+ if (ret) {
+ vbg_err("vboxguest: VBoxReportGuestInfo error: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ ret = vgdrvResetEventFilterOnHost(gdev, gdev->fFixedEvents);
+ if (ret) {
+ vbg_err("vboxguest: Error setting fixed event filter: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ ret = vgdrvResetCapabilitiesOnHost(gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error clearing guest capabilities: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ ret = vbg_core_set_mouse_status(gdev, 0);
+ if (ret) {
+ vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
+ goto err_free_reqs;
+ }
+
+ /* These may fail without requiring the driver init to fail. */
+ vgdrvInitFixateGuestMappings(gdev);
+ vgdrvHeartbeatInit(gdev);
+
+ /* All Done! */
+ ret = vgdrvReportDriverStatus(gdev, true);
+ if (ret < 0)
+ vbg_err("vboxguest: VBoxReportGuestDriverStatus error: %d\n",
+ ret);
+
+ return 0;
+
+err_free_reqs:
+ kfree(gdev->mouse_status_req);
+ kfree(gdev->pIrqAckEvents);
+ kfree(gdev->cancel_req);
+ kfree(gdev->mem_balloon.change_req);
+ return ret;
+}
+
+/**
+ * Call this on exit to clean-up vboxguest-core managed resources.
+ *
+ * The native code should call this before the driver is loaded,
+ * but don't call this on shutdown.
+ *
+ * @param gdev The Guest extension device.
+ */
+void vbg_core_exit(PVBOXGUESTDEVEXT gdev)
+{
+ /* Stop HB timer and disable host heartbeat checking. */
+ del_timer_sync(&gdev->heartbeat_timer);
+ vgdrvHeartbeatHostConfigure(gdev, false);
+ kfree(gdev->guest_heartbeat_req);
+
+ /* Clean up the bits that involves the host first. */
+ vgdrvTermUnfixGuestMappings(gdev);
+
+ /* Clear the host flags (mouse status etc). */
+ vgdrvResetEventFilterOnHost(gdev, 0);
+ vgdrvResetCapabilitiesOnHost(gdev);
+ vbg_core_set_mouse_status(gdev, 0);
+
+ kfree(gdev->pIrqAckEvents);
+ kfree(gdev->cancel_req);
+ kfree(gdev->mem_balloon.change_req);
+}
+
+/**
+ * Creates a VBoxGuest user session.
+ *
+ * vboxguest_linux.c calls this when userspace opens the char-device.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ * @param session_ret Where to store the session on success.
+ * @param user_session Set if this is a session for the vboxuser device.
+ */
+int vbg_core_open_session(PVBOXGUESTDEVEXT gdev,
+ VBOXGUESTSESSION **session_ret, bool user_session)
+{
+ VBOXGUESTSESSION *session;
+ unsigned long flags;
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ return -ENOMEM;
+
+ session->gdev = gdev;
+ session->user_session = user_session;
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ list_add(&session->list_node, &gdev->session_list);
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ *session_ret = session;
+
+ return 0;
+}
+
+/**
+ * Closes a VBoxGuest session.
+ *
+ * @param session The session to close (and free).
+ */
+void vbg_core_close_session(VBOXGUESTSESSION *session)
+{
+ PVBOXGUESTDEVEXT gdev = session->gdev;
+ unsigned long flags;
+ unsigned i;
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ list_del(&session->list_node);
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ vgdrvSetSessionCapabilities(gdev, session, 0 /*fOrMask */ ,
+ U32_MAX /*fNotMask */ ,
+ true /*fSessionTermination */ );
+ vgdrvSetSessionEventFilter(gdev, session, 0 /*fOrMask */ ,
+ U32_MAX /*fNotMask */ ,
+ true /*fSessionTermination */ );
+
+ for (i = 0; i < ARRAY_SIZE(session->aHGCMClientIds); i++) {
+ if (session->aHGCMClientIds[i])
+ vbg_hgcm_disconnect(gdev, session->aHGCMClientIds[i]);
+ }
+
+ vbg_balloon_close(gdev, session);
+ kfree(session);
+}
+
+/**
+ * Used by VGDrvCommonISR as well as the acquire guest capability code.
+ * The caller must held the event_spinlock.
+ *
+ * @param gdev The VBoxGuest device extension.
+ * @param events The events to dispatch.
+ */
+static void vbg_dispatch_events_locked(PVBOXGUESTDEVEXT gdev, u32 events)
+{
+ gdev->f32PendingEvents |= events;
+
+ wake_up(&gdev->event_wq);
+}
+
+static bool vbg_wait_event_cond(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestWaitEventInfo * info)
+{
+ unsigned long flags;
+ bool wakeup;
+ u32 events;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+ events = gdev->f32PendingEvents & info->u32EventMaskIn;
+ wakeup = events || session->cancel_waiters;
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ return wakeup;
+}
+
+/* Must be called with the event_lock held */
+static u32 vbg_consume_events_locked(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestWaitEventInfo * info)
+{
+ u32 events = gdev->f32PendingEvents & info->u32EventMaskIn;
+
+ gdev->f32PendingEvents &= ~events;
+ return events;
+}
+
+static int vbg_ioctl_wait_event(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestWaitEventInfo *info,
+ size_t *info_len_ret)
+{
+ unsigned long flags;
+ long timeout;
+ int rc = VINF_SUCCESS;
+
+ if (info->u32TimeoutIn == U32_MAX)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(info->u32TimeoutIn);
+
+ info->u32Result = VBOXGUEST_WAITEVENT_OK;
+ info->u32EventFlagsOut = 0;
+
+ if (info_len_ret)
+ *info_len_ret = sizeof(*info);
+
+ do {
+ timeout = wait_event_interruptible_timeout(
+ gdev->event_wq,
+ vbg_wait_event_cond(gdev, session, info),
+ timeout);
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+
+ if (timeout < 0 || session->cancel_waiters) {
+ info->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
+ rc = VERR_INTERRUPTED;
+ } else if (timeout == 0) {
+ info->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
+ rc = VERR_TIMEOUT;
+ } else {
+ info->u32EventFlagsOut =
+ vbg_consume_events_locked(gdev, session, info);
+ }
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ /*
+ * Someone else may have consumed the event(s) first, in
+ * which case we go back to waiting.
+ */
+ } while (rc == VINF_SUCCESS && info->u32EventFlagsOut == 0);
+
+ return rc;
+}
+
+static int vbg_ioctl_cancel_all_wait_events(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ session->cancel_waiters = true;
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Checks if the VMM request is allowed in the context of the given session.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The calling session.
+ * @param req The request.
+ */
+static int vbg_req_allowed(PVBOXGUESTDEVEXT gdev, PVBOXGUESTSESSION session,
+ VMMDevRequestHeader const *req)
+{
+ const VMMDevReportGuestStatus *guest_status;
+ bool trusted_apps_only;
+
+ switch (req->requestType) {
+ /* Trusted users apps only. */
+ case VMMDevReq_QueryCredentials:
+ case VMMDevReq_ReportCredentialsJudgement:
+ case VMMDevReq_RegisterSharedModule:
+ case VMMDevReq_UnregisterSharedModule:
+ case VMMDevReq_WriteCoreDump:
+ case VMMDevReq_GetCpuHotPlugRequest:
+ case VMMDevReq_SetCpuHotPlugStatus:
+ case VMMDevReq_CheckSharedModules:
+ case VMMDevReq_GetPageSharingStatus:
+ case VMMDevReq_DebugIsPageShared:
+ case VMMDevReq_ReportGuestStats:
+ case VMMDevReq_ReportGuestUserState:
+ case VMMDevReq_GetStatisticsChangeRequest:
+ case VMMDevReq_ChangeMemBalloon:
+ trusted_apps_only = true;
+ break;
+
+ /* Anyone. */
+ case VMMDevReq_GetMouseStatus:
+ case VMMDevReq_SetMouseStatus:
+ case VMMDevReq_SetPointerShape:
+ case VMMDevReq_GetHostVersion:
+ case VMMDevReq_Idle:
+ case VMMDevReq_GetHostTime:
+ case VMMDevReq_SetPowerStatus:
+ case VMMDevReq_AcknowledgeEvents:
+ case VMMDevReq_CtlGuestFilterMask:
+ case VMMDevReq_ReportGuestStatus:
+ case VMMDevReq_GetDisplayChangeRequest:
+ case VMMDevReq_VideoModeSupported:
+ case VMMDevReq_GetHeightReduction:
+ case VMMDevReq_GetDisplayChangeRequest2:
+ case VMMDevReq_VideoModeSupported2:
+ case VMMDevReq_VideoAccelEnable:
+ case VMMDevReq_VideoAccelFlush:
+ case VMMDevReq_VideoSetVisibleRegion:
+ case VMMDevReq_GetDisplayChangeRequestEx:
+ case VMMDevReq_GetSeamlessChangeRequest:
+ case VMMDevReq_GetVRDPChangeRequest:
+ case VMMDevReq_LogString:
+ case VMMDevReq_GetSessionId:
+ trusted_apps_only = false;
+ break;
+
+ /**
+ * @todo this have to be changed into an I/O control and the facilities
+ * tracked in the session so they can automatically be failed when
+ * the session terminates without reporting the new status.
+ *
+ * The information presented by IGuest is not reliable without this!
+ */
+ /* Depends on the request parameters... */
+ case VMMDevReq_ReportGuestCapabilities:
+ guest_status = (const VMMDevReportGuestStatus *)req;
+ switch (guest_status->guestStatus.facility) {
+ case VBoxGuestFacilityType_All:
+ case VBoxGuestFacilityType_VBoxGuestDriver:
+ vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
+ guest_status->guestStatus.facility);
+ return VERR_PERMISSION_DENIED;
+ case VBoxGuestFacilityType_VBoxService:
+ trusted_apps_only = true;
+ break;
+ case VBoxGuestFacilityType_VBoxTrayClient:
+ case VBoxGuestFacilityType_Seamless:
+ case VBoxGuestFacilityType_Graphics:
+ default:
+ trusted_apps_only = false;
+ break;
+ }
+ break;
+
+ /* Anything else is not allowed. */
+ default:
+ vbg_err("Denying userspace vmm call type %#08x\n",
+ req->requestType);
+ return VERR_PERMISSION_DENIED;
+ }
+
+ if (trusted_apps_only && session->user_session) {
+ vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
+ req->requestType);
+ return VERR_PERMISSION_DENIED;
+ }
+
+ return VINF_SUCCESS;
+}
+
+static int vgdrvIoCtl_VMMRequest(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VMMDevRequestHeader *req, size_t req_size,
+ size_t *req_size_ret)
+{
+ int rc;
+
+ rc = vbg_req_verify(req, req_size);
+ if (rc < 0)
+ return rc;
+
+ rc = vbg_req_allowed(gdev, session, req);
+ if (rc < 0)
+ return rc;
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc >= 0) {
+ WARN_ON(rc == VINF_HGCM_ASYNC_EXECUTE);
+ *req_size_ret = req->size;
+ }
+
+ return rc;
+}
+
+static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestHGCMConnectInfo * pInfo,
+ size_t *info_size_ret)
+{
+ unsigned long flags;
+ u32 client_id;
+ int i, rc;
+
+ /* Find a free place in the sessions clients array and claim it */
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ for (i = 0; i < ARRAY_SIZE(session->aHGCMClientIds); i++) {
+ if (!session->aHGCMClientIds[i]) {
+ session->aHGCMClientIds[i] = U32_MAX;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ if (i >= ARRAY_SIZE(session->aHGCMClientIds))
+ return VERR_TOO_MANY_OPEN_FILES;
+
+ rc = vbg_hgcm_connect(gdev, &pInfo->Loc, &client_id);
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ if (rc >= 0) {
+ pInfo->result = VINF_SUCCESS;
+ pInfo->u32ClientID = client_id;
+ *info_size_ret = sizeof(*pInfo);
+
+ session->aHGCMClientIds[i] = client_id;
+ } else {
+ session->aHGCMClientIds[i] = 0;
+ }
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ return rc;
+}
+
+static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestHGCMDisconnectInfo * pInfo,
+ size_t *info_size_ret)
+{
+ u32 client_id = pInfo->u32ClientID;
+ unsigned long flags;
+ int i, rc;
+
+ if (client_id == 0 || client_id == U32_MAX)
+ return VERR_INVALID_HANDLE;
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ for (i = 0; i < ARRAY_SIZE(session->aHGCMClientIds); i++) {
+ if (session->aHGCMClientIds[i] == client_id) {
+ session->aHGCMClientIds[i] = U32_MAX;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ if (i >= ARRAY_SIZE(session->aHGCMClientIds))
+ return VERR_INVALID_HANDLE;
+
+ rc = vbg_hgcm_disconnect(gdev, client_id);
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ if (rc >= 0) {
+ pInfo->result = VINF_SUCCESS;
+ *info_size_ret = sizeof(*pInfo);
+
+ session->aHGCMClientIds[i] = 0;
+ } else {
+ session->aHGCMClientIds[i] = client_id;
+ }
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+
+ return rc;
+}
+
+static int vgdrvIoCtl_HGCMCall(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestHGCMCallInfo *pInfo, u32 cMillies,
+ bool f32bit, size_t cbExtra, size_t cbData,
+ size_t *info_size_ret)
+{
+ u32 cbInfo, client_id = pInfo->u32ClientID;
+ unsigned long flags;
+ size_t cbActual;
+ unsigned i;
+ int rc;
+
+ if (pInfo->cParms > VBOX_HGCM_MAX_PARMS)
+ return VERR_INVALID_PARAMETER;
+
+ if (client_id == 0 || client_id == U32_MAX)
+ return VERR_INVALID_HANDLE;
+
+ cbActual = cbExtra + sizeof(*pInfo);
+ if (f32bit)
+ cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
+ else
+ cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
+ if (cbData < cbActual) {
+ vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
+ cbData, cbData, cbActual, cbActual);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /*
+ * Validate the client id.
+ */
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+ for (i = 0; i < ARRAY_SIZE(session->aHGCMClientIds); i++)
+ if (session->aHGCMClientIds[i] == client_id)
+ break;
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+ if (i >= ARRAY_SIZE(session->aHGCMClientIds)) {
+ vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%#08x\n",
+ client_id);
+ return VERR_INVALID_HANDLE;
+ }
+
+ cbInfo = (u32) (cbData - cbExtra);
+
+ if (f32bit)
+ rc = vbg_hgcm_call32(gdev, pInfo, cbInfo, cMillies, true);
+ else
+ rc = vbg_hgcm_call(gdev, pInfo, cbInfo, cMillies, true);
+
+ if (rc >= 0) {
+ *info_size_ret = cbActual;
+ } else if (rc == VERR_INTERRUPTED || rc == VERR_TIMEOUT ||
+ rc == VERR_OUT_OF_RANGE) {
+ vbg_debug("VBOXGUEST_IOCTL_HGCM_CALL%s error: %d\n",
+ f32bit ? "32" : "64", rc);
+ } else {
+ vbg_err("VBOXGUEST_IOCTL_HGCM_CALL%s error: %d\n",
+ f32bit ? "32" : "64", rc);
+ }
+ return rc;
+}
+
+/**
+ * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
+ *
+ * Ask the host for the size of the balloon and try to set it accordingly.
+ *
+ * @returns VBox status code
+ *
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param info The output buffer.
+ * @param info_size_ret Where to store the amount of returned data.
+ */
+static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestCheckBalloonInfo *info,
+ size_t *info_size_ret)
+{
+ VMMDevGetMemBalloonChangeRequest *req = NULL;
+ u32 i, chunks;
+ int rc;
+
+ mutex_lock(&gdev->mem_balloon.mutex);
+
+ /*
+ * The first user trying to query/change the balloon becomes the
+ * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
+ */
+ if (gdev->mem_balloon.owner && gdev->mem_balloon.owner != session) {
+ rc = VERR_PERMISSION_DENIED;
+ goto out;
+ }
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_GetMemBalloonChangeRequest);
+ if (!req) {
+ rc = VERR_NO_MEMORY;
+ goto out;
+ }
+
+ /*
+ * Setting this bit means that we request the value from the host and
+ * change the guest memory balloon according to the returned value.
+ */
+ req->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ goto out;
+
+ /*
+ * The host always returns the same maximum amount of chunks, so
+ * we do this once.
+ */
+ if (!gdev->mem_balloon.max_chunks) {
+ gdev->mem_balloon.pages =
+ devm_kcalloc(gdev->dev, req->cPhysMemChunks,
+ sizeof(struct page **), GFP_KERNEL);
+ if (!gdev->mem_balloon.pages) {
+ rc = VERR_NO_MEMORY;
+ goto out;
+ }
+ gdev->mem_balloon.max_chunks = req->cPhysMemChunks;
+ }
+
+ chunks = req->cBalloonChunks;
+ if (chunks > gdev->mem_balloon.max_chunks) {
+ vbg_err("VBOXGUEST_IOCTL_CHECK_BALLOON: illegal balloon size %u (max=%u)\n",
+ chunks, gdev->mem_balloon.max_chunks);
+ rc = VERR_INVALID_PARAMETER;
+ goto out;
+ }
+
+ if (req->cBalloonChunks > gdev->mem_balloon.chunks) {
+ /* inflate */
+ for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
+ rc = vbg_balloon_inflate(gdev, i);
+ if (rc < 0) {
+ /* Failure to alloc memory is not an error */
+ if (rc == VERR_NO_MEMORY)
+ rc = VINF_SUCCESS;
+ break;
+ }
+ gdev->mem_balloon.chunks++;
+ }
+ } else {
+ /* deflate */
+ for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
+ rc = vbg_balloon_deflate(gdev, i);
+ if (rc < 0)
+ break;
+
+ gdev->mem_balloon.chunks--;
+ }
+ }
+
+ info->cBalloonChunks = gdev->mem_balloon.chunks;
+ /* Under Linux we always handle the balloon in R0 / in the kernel */
+ info->fHandleInR3 = false;
+ *info_size_ret = sizeof(VBoxGuestCheckBalloonInfo);
+
+ gdev->mem_balloon.owner = session;
+out:
+ kfree(req);
+ mutex_unlock(&gdev->mem_balloon.mutex);
+ return rc;
+}
+
+/**
+ * Handle a request for writing a core dump of the guest on the host.
+ *
+ * @returns VBox status code
+ *
+ * @param gdev The Guest extension device.
+ * @param pInfo The output buffer.
+ */
+static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT gdev,
+ VBoxGuestWriteCoreDump * pInfo)
+{
+ VMMDevReqWriteCoreDump *req = NULL;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_WriteCoreDump);
+ if (!req)
+ return VERR_NO_MEMORY;
+
+ req->fFlags = pInfo->fFlags;
+ rc = vbg_req_perform(gdev, req);
+
+ kfree(req);
+ return rc;
+}
+
+/** @name Guest Capabilities, Mouse Status and Event Filter
+ * @{
+ */
+
+/**
+ * Clears a bit usage tracker (init time).
+ *
+ * @param pTracker The tracker to clear.
+ */
+static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
+{
+ u32 iBit;
+
+ for (iBit = 0; iBit < 32; iBit++)
+ pTracker->acPerBitUsage[iBit] = 0;
+ pTracker->fMask = 0;
+}
+
+/**
+ * Applies a change to the bit usage tracker.
+ *
+ * @returns true if the mask changed, false if not.
+ * @param pTracker The bit usage tracker.
+ * @param changed The bits to change.
+ * @param previous The previous value of the bits.
+ */
+static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker,
+ u32 changed, u32 previous)
+{
+ bool global_change = false;
+
+ while (changed) {
+ u32 const iBit = ffs(changed) - 1;
+ u32 const fBitMask = BIT(iBit);
+
+ if (fBitMask & previous) {
+ pTracker->acPerBitUsage[iBit] -= 1;
+ if (pTracker->acPerBitUsage[iBit] == 0) {
+ global_change = true;
+ pTracker->fMask &= ~fBitMask;
+ }
+ } else {
+ pTracker->acPerBitUsage[iBit] += 1;
+ if (pTracker->acPerBitUsage[iBit] == 1) {
+ global_change = true;
+ pTracker->fMask |= fBitMask;
+ }
+ }
+
+ changed &= ~fBitMask;
+ }
+
+ return global_change;
+}
+
+/**
+ * Init and termination worker for resetting the (host) event filter on the host
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ * @param fFixedEvents Fixed events (init time).
+ */
+static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT gdev,
+ u32 fFixedEvents)
+{
+ VMMDevCtlGuestFilterMask *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_CtlGuestFilterMask);
+ if (!req)
+ return -ENOMEM;
+
+ req->u32NotMask = U32_MAX & ~fFixedEvents;
+ req->u32OrMask = fFixedEvents;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("vgdrvResetEventFilterOnHost error: %d\n", rc);
+
+ kfree(req);
+ return -vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Changes the event filter mask for the given session.
+ *
+ * This is called in response to VBOXGUEST_IOCTL_CTL_FILTER_MASK as well as to
+ * do session cleanup.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param fOrMask The events to add.
+ * @param fNotMask The events to remove.
+ * @param fSessionTermination Set if we're called by the session cleanup code.
+ * This tweaks the error handling so we perform
+ * proper session cleanup even if the host
+ * misbehaves.
+ *
+ * @remarks Takes the session spinlock.
+ */
+static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ u32 fOrMask, u32 fNotMask,
+ bool fSessionTermination)
+{
+ VMMDevCtlGuestFilterMask *req;
+ u32 changed, previous;
+ unsigned long flags;
+ bool global_change;
+ int rc = VINF_SUCCESS;
+
+ /* Allocate a request buffer before taking the spinlock */
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_CtlGuestFilterMask);
+ if (!req) {
+ if (!fSessionTermination)
+ return VERR_NO_MEMORY;
+ /* Ignore failure, we must do session cleanup. */
+ }
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+
+ /* Apply the changes to the session mask. */
+ previous = session->fEventFilter;
+ session->fEventFilter |= fOrMask;
+ session->fEventFilter &= ~fNotMask;
+
+ /* If anything actually changed, update the global usage counters. */
+ changed = previous ^ session->fEventFilter;
+ if (!changed)
+ goto out;
+
+ global_change = vgdrvBitUsageTrackerChange(&gdev->EventFilterTracker,
+ changed, previous);
+
+ if (!(global_change || gdev->fEventFilterHost == U32_MAX) || !req)
+ goto out;
+
+ req->u32OrMask = gdev->fFixedEvents | gdev->EventFilterTracker.fMask;
+ if (req->u32OrMask == gdev->fEventFilterHost)
+ goto out;
+
+ gdev->fEventFilterHost = req->u32OrMask;
+ req->u32NotMask = ~req->u32OrMask;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0) {
+ /* Failed, roll back (unless it's session termination time). */
+ gdev->fEventFilterHost = U32_MAX;
+ if (fSessionTermination)
+ goto out;
+
+ vgdrvBitUsageTrackerChange(&gdev->EventFilterTracker, changed,
+ session->fEventFilter);
+ session->fEventFilter = previous;
+ }
+
+out:
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+ kfree(req);
+
+ return rc;
+}
+
+/**
+ * Handle VBOXGUEST_IOCTL_CTL_FILTER_MASK.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param pInfo The request.
+ */
+static int vgdrvIoCtl_CtlFilterMask(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestFilterMaskInfo * pInfo)
+{
+ if ((pInfo->u32OrMask | pInfo->u32NotMask) &
+ ~VMMDEV_EVENT_VALID_EVENT_MASK)
+ return VERR_INVALID_PARAMETER;
+
+ return vgdrvSetSessionEventFilter(gdev, session, pInfo->u32OrMask,
+ pInfo->u32NotMask,
+ false /*fSessionTermination */ );
+}
+
+/**
+ * Report guest supported mouse-features to the host.
+ *
+ * @returns 0 or negative errno value.
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param features The set of features to report to the host.
+ */
+int vbg_core_set_mouse_status(PVBOXGUESTDEVEXT gdev, u32 features)
+{
+ VMMDevReqMouseStatus *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetMouseStatus);
+ if (!req)
+ return -ENOMEM;
+
+ req->mouseFeatures = features;
+ req->pointerXPos = 0;
+ req->pointerYPos = 0;
+
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("vbg_core_set_mouse_status error: %d\n", rc);
+
+ kfree(req);
+ return -vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Init and termination worker for set guest capabilities to zero on the host.
+ *
+ * @returns 0 or negative errno value.
+ * @param gdev The Guest extension device.
+ */
+static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT gdev)
+{
+ VMMDevReqGuestCapabilities2 *req;
+ int rc;
+
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetGuestCapabilities);
+ if (!req)
+ return -ENOMEM;
+
+ req->u32NotMask = U32_MAX;
+ req->u32OrMask = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ vbg_err("vgdrvResetCapabilitiesOnHost error: %d\n", rc);
+
+ kfree(req);
+ return -vbg_status_code_to_errno(rc);
+}
+
+/**
+ * Sets the guest capabilities to the host while holding the lock.
+ *
+ * This will ASSUME that we're the ones in charge of the mask, so
+ * we'll simply clear all bits we don't set.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param req The request.
+ */
+static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT gdev,
+ VMMDevReqGuestCapabilities2 *req)
+{
+ int rc;
+
+ req->u32OrMask = gdev->SetGuestCapsTracker.fMask;
+
+ if (req->u32OrMask == gdev->fGuestCapsHost)
+ return VINF_SUCCESS;
+
+ gdev->fGuestCapsHost = req->u32OrMask;
+ req->u32NotMask = ~req->u32OrMask;
+ rc = vbg_req_perform(gdev, req);
+ if (rc < 0)
+ gdev->fGuestCapsHost = U32_MAX;
+
+ return rc;
+}
+
+/**
+ * Sets the guest capabilities for a session.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param fOrMask The capabilities to add.
+ * @param fNotMask The capabilities to remove.
+ * @param fSessionTermination Set if we're called by the session cleanup code.
+ * This tweaks the error handling so we perform
+ * proper session cleanup even if the host
+ * misbehaves.
+ *
+ * @remarks Takes the session spinlock.
+ */
+static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ u32 fOrMask, u32 fNotMask,
+ bool fSessionTermination)
+{
+ VMMDevReqGuestCapabilities2 *req;
+ unsigned long flags;
+ int rc = VINF_SUCCESS;
+ u32 changed, previous;
+
+ /* Allocate a request buffer before taking the spinlock */
+ req = vbg_req_alloc(sizeof(*req), VMMDevReq_SetGuestCapabilities);
+ if (!req) {
+ if (!fSessionTermination)
+ return VERR_NO_MEMORY;
+ /* Ignore failure, we must do session cleanup. */
+ }
+
+ spin_lock_irqsave(&gdev->session_spinlock, flags);
+
+ /*
+ * Apply the changes to the session mask.
+ */
+ previous = session->fCapabilities;
+ session->fCapabilities |= fOrMask;
+ session->fCapabilities &= ~fNotMask;
+
+ /*
+ * If anything actually changed, update the global usage counters.
+ */
+ changed = previous ^ session->fCapabilities;
+ if (changed) {
+ bool global_change =
+ vgdrvBitUsageTrackerChange(&gdev->
+ SetGuestCapsTracker,
+ changed, previous);
+
+ /*
+ * If there are global changes, update the capabilities on the host.
+ */
+ if (global_change
+ || gdev->fGuestCapsHost == U32_MAX) {
+ if (req) {
+ rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(gdev, req);
+
+ /* On failure, roll back (unless it's session termination time). */
+ if (rc < 0 && !fSessionTermination) {
+ vgdrvBitUsageTrackerChange
+ (&gdev->
+ SetGuestCapsTracker,
+ changed,
+ session->fCapabilities);
+ session->fCapabilities =
+ previous;
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&gdev->session_spinlock, flags);
+ kfree(req);
+
+ return rc;
+}
+
+/**
+ * Handle VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param session The session.
+ * @param pInfo The request.
+ */
+static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session,
+ VBoxGuestSetCapabilitiesInfo * pInfo)
+{
+ if ((pInfo->u32OrMask | pInfo->u32NotMask) &
+ ~VMMDEV_GUEST_CAPABILITIES_MASK)
+ return VERR_INVALID_PARAMETER;
+
+ return vgdrvSetSessionCapabilities(gdev, session,
+ pInfo->u32OrMask, pInfo->u32NotMask,
+ false);
+}
+
+/** @} */
+
+/**
+ * Common IOCtl for user to kernel communication.
+ *
+ * This function only does the basic validation and then invokes
+ * worker functions that takes care of each specific function.
+ *
+ * @returns VBox status code
+ * @param iFunction The requested function.
+ * @param gdev The Guest extension device.
+ * @param session The client session.
+ * @param pvData The input/output data buffer. Can be NULL depending on the function.
+ * @param cbData The max size of the data buffer.
+ * @param data_size_ret Where to store the amount of returned data.
+ */
+int VGDrvCommonIoCtl(unsigned iFunction, PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session, void *pvData, size_t cbData,
+ size_t *data_size_ret)
+{
+ int rc;
+
+ *data_size_ret = 0;
+
+ /*
+ * Deal with variably sized requests first.
+ */
+ if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0))) {
+ rc = vgdrvIoCtl_VMMRequest(gdev, session,
+ (VMMDevRequestHeader *) pvData,
+ cbData, data_size_ret);
+ }
+ /*
+ * These ones are a bit tricky.
+ */
+ else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0))) {
+ if (cbData < sizeof(VBoxGuestHGCMCallInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMCall(gdev, session,
+ (VBoxGuestHGCMCallInfo *)pvData,
+ U32_MAX, false, 0,
+ cbData, data_size_ret);
+ } else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED
+ (0))) {
+ VBoxGuestHGCMCallInfoTimed *pInfo =
+ (VBoxGuestHGCMCallInfoTimed *) pvData;
+ if (cbData < sizeof(VBoxGuestHGCMCallInfoTimed))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMCall(gdev, session, &pInfo->info,
+ pInfo->u32Timeout, false,
+ offsetof(VBoxGuestHGCMCallInfoTimed,
+ info),
+ cbData, data_size_ret);
+ }
+#ifdef CONFIG_X86_64
+ else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0))) {
+ if (cbData < sizeof(VBoxGuestHGCMCallInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMCall(gdev, session,
+ (VBoxGuestHGCMCallInfo *)pvData,
+ U32_MAX, true, 0,
+ cbData, data_size_ret);
+ } else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32
+ (0))) {
+ VBoxGuestHGCMCallInfoTimed *pInfo = pvData;
+
+ if (cbData < sizeof(VBoxGuestHGCMCallInfoTimed))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMCall(gdev, session, &pInfo->info,
+ pInfo->u32Timeout, true,
+ offsetof(VBoxGuestHGCMCallInfoTimed,
+ info),
+ cbData, data_size_ret);
+ }
+#endif
+ else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) ==
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0))) {
+ vbg_info("%.*s", (int)cbData, (char *)pvData);
+ } else {
+ switch (iFunction) {
+ case VBOXGUEST_IOCTL_WAITEVENT:
+ if (cbData < sizeof(VBoxGuestWaitEventInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vbg_ioctl_wait_event(gdev, session,
+ (VBoxGuestWaitEventInfo *)
+ pvData, data_size_ret);
+ break;
+
+ case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
+ if (cbData != 0)
+ return VERR_BUFFER_OVERFLOW;
+ rc = vbg_ioctl_cancel_all_wait_events(gdev,
+ session);
+ break;
+
+ case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
+ if (cbData < sizeof(VBoxGuestFilterMaskInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_CtlFilterMask(gdev, session,
+ (VBoxGuestFilterMaskInfo
+ *) pvData);
+ break;
+
+ case VBOXGUEST_IOCTL_HGCM_CONNECT:
+#ifdef CONFIG_X86_64 /* Needed because these are identical on 32 bit builds */
+ case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
+#endif
+ if (cbData < sizeof(VBoxGuestHGCMConnectInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMConnect(gdev, session,
+ (VBoxGuestHGCMConnectInfo *)
+ pvData, data_size_ret);
+ break;
+
+ case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
+#ifdef CONFIG_X86_64
+ case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
+#endif
+ if (cbData < sizeof(VBoxGuestHGCMDisconnectInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_HGCMDisconnect(gdev, session,
+ (VBoxGuestHGCMDisconnectInfo
+ *) pvData,
+ data_size_ret);
+ break;
+
+ case VBOXGUEST_IOCTL_CHECK_BALLOON:
+ if (cbData < sizeof(VBoxGuestCheckBalloonInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_CheckMemoryBalloon(gdev, session,
+ (VBoxGuestCheckBalloonInfo
+ *) pvData,
+ data_size_ret);
+ break;
+
+ case VBOXGUEST_IOCTL_CHANGE_BALLOON:
+ /* Under Linux we always handle the balloon in R0. */
+ rc = VERR_PERMISSION_DENIED;
+ break;
+
+ case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
+ if (cbData < sizeof(VBoxGuestWriteCoreDump))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_WriteCoreDump(gdev,
+ (VBoxGuestWriteCoreDump *)
+ pvData);
+ break;
+
+ case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
+ vbg_err("VGDrvCommonIoCtl: VBOXGUEST_IOCTL_SET_MOUSE_STATUS should not be used under Linux\n");
+ rc = VERR_NOT_SUPPORTED;
+ break;
+
+ case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
+ vbg_err("VGDrvCommonIoCtl: VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE should not be used under Linux\n");
+ rc = VERR_NOT_SUPPORTED;
+ break;
+
+ case VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES:
+ if (cbData < sizeof(VBoxGuestSetCapabilitiesInfo))
+ return VERR_BUFFER_OVERFLOW;
+ rc = vgdrvIoCtl_SetCapabilities(gdev, session,
+ (VBoxGuestSetCapabilitiesInfo
+ *) pvData);
+ break;
+
+ default:
+ vbg_debug("VGDrvCommonIoCtl: Unknown request %#08x\n",
+ VBOXGUEST_IOCTL_STRIP_SIZE(iFunction));
+ rc = VERR_NOT_SUPPORTED;
+ }
+ }
+
+ return rc;
+}
+
+/** Core interrupt service routine. */
+irqreturn_t vbg_core_isr(int irq, void *dev_id)
+{
+ PVBOXGUESTDEVEXT gdev = dev_id;
+ VMMDevEvents *req = gdev->pIrqAckEvents;
+ bool fMousePositionChanged = false;
+ bool fOurIrq;
+ unsigned long flags;
+ int rc;
+
+ /*
+ * Enter the spinlock and check if it's our IRQ or not.
+ */
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ fOurIrq = gdev->pVMMDevMemory->V.V1_04.fHaveEvents;
+ if (fOurIrq) {
+ /* Acknowlegde events. */
+ req->header.rc = VERR_INTERNAL_ERROR;
+ req->events = 0;
+ rc = vbg_req_perform(gdev, req);
+ if (rc >= 0) {
+ u32 fEvents = req->events;
+
+ /*
+ * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
+ */
+ if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
+ fMousePositionChanged = true;
+ fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
+ }
+
+ /*
+ * The HGCM event/list is kind of different in that we evaluate all entries.
+ */
+ if (fEvents & VMMDEV_EVENT_HGCM) {
+ wake_up(&gdev->hgcm_wq);
+ fEvents &= ~VMMDEV_EVENT_HGCM;
+ }
+
+ /*
+ * Normal FIFO waiter evaluation.
+ */
+ vbg_dispatch_events_locked(gdev, fEvents);
+ }
+ }
+
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ if (fMousePositionChanged)
+ VGDrvNativeISRMousePollEvent(gdev);
+
+ return fOurIrq ? IRQ_HANDLED : IRQ_NONE;
+}
diff --git a/drivers/misc/vboxguest/vboxguest_core.h b/drivers/misc/vboxguest/vboxguest_core.h
new file mode 100644
index 000000000000..81a36c6627d2
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_core.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2010-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOXGUEST_CORE_H__
+#define __VBOXGUEST_CORE_H__
+
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/vbox_vmmdev.h>
+#include <linux/vboxguest.h>
+
+/** Pointer to the VBoxGuest per session data. */
+typedef struct VBOXGUESTSESSION *PVBOXGUESTSESSION;
+
+/** VBox guest memory balloon. */
+typedef struct VBOXGUESTMEMBALLOON {
+ /** Mutex protecting the members below from concurrent access. */
+ struct mutex mutex;
+ /** Pre-allocated VMMDevChangeMemBalloon req for inflate / deflate */
+ VMMDevChangeMemBalloon *change_req;
+ /** The current number of chunks in the balloon. */
+ u32 chunks;
+ /** The maximum number of chunks in the balloon. */
+ u32 max_chunks;
+ /** The current owner of the balloon. */
+ PVBOXGUESTSESSION owner;
+ /**
+ * Array of pointers to page arrays. A page * array is allocated for
+ * each chunk when inflating, and freed when the deflating.
+ */
+ struct page ***pages;
+} VBOXGUESTMEMBALLOON;
+
+/**
+ * Per bit usage tracker for a u32 mask.
+ *
+ * Used for optimal handling of guest properties and event filter.
+ */
+typedef struct VBOXGUESTBITUSAGETRACER {
+ /** Per bit usage counters. */
+ u32 acPerBitUsage[32];
+ /** The current mask according to acPerBitUsage. */
+ u32 fMask;
+} VBOXGUESTBITUSAGETRACER;
+/** Pointer to a per bit usage tracker. */
+typedef VBOXGUESTBITUSAGETRACER *PVBOXGUESTBITUSAGETRACER;
+/** Pointer to a const per bit usage tracker. */
+typedef VBOXGUESTBITUSAGETRACER const *PCVBOXGUESTBITUSAGETRACER;
+
+/**
+ * VBox guest device (data) extension.
+ */
+typedef struct VBOXGUESTDEVEXT {
+ struct device *dev;
+ /** The base of the adapter I/O ports. */
+ u16 IOPortBase;
+ /** Pointer to the mapping of the VMMDev adapter memory. */
+ VMMDevMemory volatile *pVMMDevMemory;
+ /**
+ * Dummy page and vmap address for reserved kernel virtual-address
+ * space for the guest mappings, only used on hosts lacking vtx.
+ */
+ struct page *guest_mappings_dummy_page;
+ void *guest_mappings;
+ /** Spinlock protecting the signaling and resetting of the wait-for-event
+ * semaphores as well as the event acking in the ISR. */
+ spinlock_t event_spinlock;
+ /** Preallocated VMMDevEvents for the IRQ handler. */
+ VMMDevEvents *pIrqAckEvents;
+ /** Wait-for-event list for threads waiting for multiple events. */
+ wait_queue_head_t event_wq;
+ /** Mask of pending events. */
+ u32 f32PendingEvents;
+ /** Wait-for-event list for threads waiting on HGCM async completion. */
+ wait_queue_head_t hgcm_wq;
+ /** Pre-allocated hgcm cancel2 req. for cancellation on timeout */
+ VMMDevHGCMCancel2 *cancel_req;
+ /** Mutex protecting cancel_req accesses */
+ struct mutex cancel_req_mutex;
+ /** Pre-allocated mouse-status request for the input-device handling. */
+ VMMDevReqMouseStatus *mouse_status_req;
+ /** Input device for reporting abs mouse coordinates to the guest. */
+ struct input_dev *input;
+
+ /** Spinlock various items in the VBOXGUESTSESSION. */
+ spinlock_t session_spinlock;
+ /** List of guest sessions (VBOXGUESTSESSION). We currently traverse this
+ * but do not search it, so a list data type should be fine. Use under the
+ * #SessionSpinlock lock. */
+ struct list_head session_list;
+ /** Memory balloon information. */
+ VBOXGUESTMEMBALLOON mem_balloon;
+
+ /** @name Host Event Filtering
+ * @{ */
+ /** Events we won't permit anyone to filter out. */
+ u32 fFixedEvents;
+ /** Usage counters for the host events. (Fixed events are not included.) */
+ VBOXGUESTBITUSAGETRACER EventFilterTracker;
+ /** The event filter last reported to the host (UINT32_MAX on failure). */
+ u32 fEventFilterHost;
+ /** @} */
+
+ /** @name Guest Capabilities
+ * @{ */
+ /** Usage counters for guest capabilities in "set" mode. Indexed by
+ * capability bit number, one count per session using a capability. */
+ VBOXGUESTBITUSAGETRACER SetGuestCapsTracker;
+ /** The guest capabilities last reported to the host (UINT32_MAX on failure). */
+ u32 fGuestCapsHost;
+ /** @} */
+
+ /** Heartbeat timer which fires with interval
+ * cNsHearbeatInterval and its handler sends
+ * VMMDevReq_GuestHeartbeat to VMMDev. */
+ struct timer_list heartbeat_timer;
+ /** Heartbeat timer interval in ms. */
+ int heartbeat_interval_ms;
+ /** Preallocated VMMDevReq_GuestHeartbeat request. */
+ VMMDevRequestHeader *guest_heartbeat_req;
+
+ /** "vboxguest" char-device */
+ struct miscdevice misc_device;
+ /** "vboxuser" char-device */
+ struct miscdevice misc_device_user;
+} VBOXGUESTDEVEXT;
+/** Pointer to the VBoxGuest driver data. */
+typedef VBOXGUESTDEVEXT *PVBOXGUESTDEVEXT;
+
+/**
+ * The VBoxGuest per session data.
+ */
+typedef struct VBOXGUESTSESSION {
+ /** The list node. */
+ struct list_head list_node;
+ /** Pointer to the device extension. */
+ PVBOXGUESTDEVEXT gdev;
+
+ /** Array containing HGCM client IDs associated with this session.
+ * This will be automatically disconnected when the session is closed. */
+ u32 volatile aHGCMClientIds[64];
+ /** Host events requested by the session.
+ * An event type requested in any guest session will be added to the host
+ * filter. Protected by VBOXGUESTDEVEXT::SessionSpinlock. */
+ u32 fEventFilter;
+ /** Guest capabilities in "set" mode for this session.
+ * These accumulated for sessions via VBOXGUESTDEVEXT::acGuestCapsSet and
+ * reported to the host. Protected by VBOXGUESTDEVEXT::SessionSpinlock. */
+ u32 fCapabilities;
+ /** Does this session belong to a root process or a user one? */
+ bool user_session;
+
+ /** Set on CANCEL_ALL_WAITEVENTS, protected by the event_spinlock. */
+ bool cancel_waiters;
+} VBOXGUESTSESSION;
+
+int vbg_core_init(PVBOXGUESTDEVEXT gdev, u32 fixed_events);
+void vbg_core_exit(PVBOXGUESTDEVEXT gdev);
+int vbg_core_open_session(PVBOXGUESTDEVEXT gdev,
+ VBOXGUESTSESSION **session_ret, bool user_session);
+void vbg_core_close_session(VBOXGUESTSESSION *session);
+irqreturn_t vbg_core_isr(int irq, void *dev_id);
+
+int VGDrvCommonIoCtl(unsigned iFunction, PVBOXGUESTDEVEXT gdev,
+ PVBOXGUESTSESSION session, void *data, size_t data_size,
+ size_t *data_size_ret);
+
+int vbg_core_set_mouse_status(PVBOXGUESTDEVEXT gdev, u32 features);
+void VGDrvNativeISRMousePollEvent(PVBOXGUESTDEVEXT gdev);
+
+#endif
diff --git a/drivers/misc/vboxguest/vboxguest_linux.c b/drivers/misc/vboxguest/vboxguest_linux.c
new file mode 100644
index 000000000000..8468c7139b98
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_linux.c
@@ -0,0 +1,464 @@
+/*
+ * vboxguest linux pci driver, char-dev and input-device code,
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/** The device name. */
+#define DEVICE_NAME "vboxguest"
+/** The device name for the device node open to everyone. */
+#define DEVICE_NAME_USER "vboxuser"
+/** VirtualBox PCI vendor ID. */
+#define VBOX_VENDORID 0x80ee
+/** VMMDev PCI card product ID. */
+#define VMMDEV_DEVICEID 0xcafe
+
+/** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */
+static DEFINE_MUTEX(vbg_gdev_mutex);
+/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
+static PVBOXGUESTDEVEXT vbg_gdev;
+
+static long vgdrvLinuxIOCtl(struct file *pFilp, unsigned int uCmd,
+ unsigned long ulArg);
+
+static int vbg_misc_device_open(struct inode *inode, struct file *filp)
+{
+ PVBOXGUESTSESSION session;
+ PVBOXGUESTDEVEXT gdev;
+ int ret;
+
+ /* misc_open sets filp->private_data to our misc device */
+ gdev = container_of(filp->private_data, VBOXGUESTDEVEXT, misc_device);
+
+ ret = vbg_core_open_session(gdev, &session, false);
+ if (ret)
+ return -ENOMEM;
+
+ filp->private_data = session;
+ return 0;
+}
+
+static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
+{
+ PVBOXGUESTSESSION session;
+ PVBOXGUESTDEVEXT gdev;
+ int ret;
+
+ /* misc_open sets filp->private_data to our misc device */
+ gdev = container_of(filp->private_data, VBOXGUESTDEVEXT,
+ misc_device_user);
+
+ ret = vbg_core_open_session(gdev, &session, true);
+ if (ret)
+ return ret;
+
+ filp->private_data = session;
+ return 0;
+}
+
+/**
+ * Close device.
+ *
+ * @param inode Pointer to inode info structure.
+ * @param filp Associated file pointer.
+ */
+static int vbg_misc_device_close(struct inode *inode, struct file *filp)
+{
+ vbg_core_close_session(filp->private_data);
+ filp->private_data = NULL;
+ return 0;
+}
+
+/** The file_operations structures. */
+static const struct file_operations vbg_misc_device_fops = {
+ .owner = THIS_MODULE,
+ .open = vbg_misc_device_open,
+ .release = vbg_misc_device_close,
+ .unlocked_ioctl = vgdrvLinuxIOCtl,
+};
+static const struct file_operations vbg_misc_device_user_fops = {
+ .owner = THIS_MODULE,
+ .open = vbg_misc_device_user_open,
+ .release = vbg_misc_device_close,
+ .unlocked_ioctl = vgdrvLinuxIOCtl,
+};
+
+/**
+ * Called when the input device is first opened.
+ *
+ * Sets up absolute mouse reporting.
+ */
+static int vbg_input_open(struct input_dev *input)
+{
+ PVBOXGUESTDEVEXT gdev = input_get_drvdata(input);
+ u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
+ int ret;
+
+ ret = vbg_core_set_mouse_status(gdev, feat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Called if all open handles to the input device are closed.
+ *
+ * Disables absolute reporting.
+ */
+static void vbg_input_close(struct input_dev *input)
+{
+ PVBOXGUESTDEVEXT gdev = input_get_drvdata(input);
+
+ vbg_core_set_mouse_status(gdev, 0);
+}
+
+/**
+ * Creates the kernel input device.
+ *
+ * @returns 0 on success, negated errno on failure.
+ */
+static int vbg_create_input_device(PVBOXGUESTDEVEXT gdev)
+{
+ struct input_dev *input;
+
+ input = devm_input_allocate_device(gdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->id.bustype = BUS_PCI;
+ input->id.vendor = VBOX_VENDORID;
+ input->id.product = VMMDEV_DEVICEID;
+ input->open = vbg_input_open;
+ input->close = vbg_input_close;
+ input->dev.parent = gdev->dev;
+ input->name = "VirtualBox mouse integration";
+
+ input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN,
+ VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+ input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN,
+ VMMDEV_MOUSE_RANGE_MAX, 0, 0);
+ input_set_capability(input, EV_KEY, BTN_MOUSE);
+ input_set_drvdata(input, gdev);
+
+ gdev->input = input;
+
+ return input_register_device(gdev->input);
+}
+
+/**
+ * Does the PCI detection and init of the device.
+ *
+ * @returns 0 on success, negated errno on failure.
+ */
+static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct device *dev = &pci->dev;
+ resource_size_t io, io_len, mmio, mmio_len;
+ volatile VMMDevMemory *vmmdev;
+ PVBOXGUESTDEVEXT gdev;
+ int ret;
+
+ gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL);
+ if (!gdev)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pci);
+ if (ret != 0) {
+ vbg_err("vboxguest: Error enabling device: %d\n", ret);
+ return ret;
+ }
+
+ ret = -ENODEV;
+
+ io = pci_resource_start(pci, 0);
+ io_len = pci_resource_len(pci, 0);
+ if (!io || !io_len) {
+ vbg_err("vboxguest: Error IO-port resource (0) is missing\n");
+ goto err_disable_pcidev;
+ }
+ if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) {
+ vbg_err("vboxguest: Error could not claim IO resource\n");
+ ret = -EBUSY;
+ goto err_disable_pcidev;
+ }
+
+ mmio = pci_resource_start(pci, 1);
+ mmio_len = pci_resource_len(pci, 1);
+ if (!mmio || !mmio_len) {
+ vbg_err("vboxguest: Error MMIO resource (1) is missing\n");
+ goto err_disable_pcidev;
+ }
+
+ if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) {
+ vbg_err("vboxguest: Error could not claim MMIO resource\n");
+ ret = -EBUSY;
+ goto err_disable_pcidev;
+ }
+
+ vmmdev = devm_ioremap(dev, mmio, mmio_len);
+ if (!vmmdev) {
+ vbg_err("vboxguest: Error ioremap failed; MMIO addr=%p size=%d\n",
+ (void *)mmio, (int)mmio_len);
+ goto err_disable_pcidev;
+ }
+
+ /* Validate MMIO region version and size. */
+ if (vmmdev->u32Version != VMMDEV_MEMORY_VERSION ||
+ vmmdev->u32Size < 32 || vmmdev->u32Size > mmio_len) {
+ vbg_err("vboxguest: Bogus VMMDev memory; u32Version=%08x (expected %08x) u32Size=%d (expected <= %d)\n",
+ vmmdev->u32Version, VMMDEV_MEMORY_VERSION,
+ vmmdev->u32Size, (int)mmio_len);
+ goto err_disable_pcidev;
+ }
+
+ gdev->IOPortBase = io;
+ gdev->pVMMDevMemory = vmmdev;
+ gdev->dev = dev;
+ gdev->misc_device.minor = MISC_DYNAMIC_MINOR;
+ gdev->misc_device.name = DEVICE_NAME;
+ gdev->misc_device.fops = &vbg_misc_device_fops;
+ gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR;
+ gdev->misc_device_user.name = DEVICE_NAME_USER;
+ gdev->misc_device_user.fops = &vbg_misc_device_user_fops;
+
+ ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
+ if (ret)
+ goto err_disable_pcidev;
+
+ ret = vbg_create_input_device(gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error creating input device: %d\n", ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
+ DEVICE_NAME, gdev);
+ if (ret) {
+ vbg_err("vboxguest: Error requesting irq: %d\n", ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = misc_register(&gdev->misc_device);
+ if (ret) {
+ vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+ DEVICE_NAME, ret);
+ goto err_vbg_core_exit;
+ }
+
+ ret = misc_register(&gdev->misc_device_user);
+ if (ret) {
+ vbg_err("vboxguest: Error misc_register %s failed: %d\n",
+ DEVICE_NAME_USER, ret);
+ goto err_unregister_misc_device;
+ }
+
+ mutex_lock(&vbg_gdev_mutex);
+ if (!vbg_gdev)
+ vbg_gdev = gdev;
+ else
+ ret = -EBUSY;
+ mutex_unlock(&vbg_gdev_mutex);
+
+ if (ret) {
+ vbg_err("vboxguest: Error more then 1 vbox guest pci device\n");
+ goto err_unregister_misc_device_user;
+ }
+
+ pci_set_drvdata(pci, gdev);
+ vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %p (size 0x%d)\n",
+ gdev->misc_device.minor, pci->irq, gdev->IOPortBase,
+ (void *)mmio, (int)mmio_len);
+
+ return 0;
+
+err_unregister_misc_device_user:
+ misc_deregister(&gdev->misc_device_user);
+err_unregister_misc_device:
+ misc_deregister(&gdev->misc_device);
+err_vbg_core_exit:
+ vbg_core_exit(gdev);
+err_disable_pcidev:
+ pci_disable_device(pci);
+
+ return ret;
+}
+
+static void vbg_pci_remove(struct pci_dev *pci)
+{
+ PVBOXGUESTDEVEXT gdev = pci_get_drvdata(pci);
+
+ mutex_lock(&vbg_gdev_mutex);
+ vbg_gdev = NULL;
+ mutex_unlock(&vbg_gdev_mutex);
+
+ misc_deregister(&gdev->misc_device_user);
+ misc_deregister(&gdev->misc_device);
+ vbg_core_exit(gdev);
+ pci_disable_device(pci);
+}
+
+/**
+ * Helper for the vboxsf driver to get a reference to the guest pci device.
+ * @returns a pointer to the gdev; or a ERR_PTR value on error.
+ */
+PVBOXGUESTDEVEXT vbg_get_gdev(void)
+{
+ mutex_lock(&vbg_gdev_mutex);
+
+ /*
+ * Note on success we keep the mutex locked until vbg_put_gdev(),
+ * this stops vbg_pci_remove from removing the device from underneath
+ * vboxsf. vboxsf will only hold a reference for a short while.
+ */
+ if (vbg_gdev)
+ return vbg_gdev;
+
+ mutex_unlock(&vbg_gdev_mutex);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL(vbg_get_gdev);
+
+/**
+ * Helper for the vboxsf driver to put a guest pci device reference.
+ * @param gdev Reference returned by vbg_get_gdev to put.
+ */
+void vbg_put_gdev(PVBOXGUESTDEVEXT gdev)
+{
+ WARN_ON(gdev != vbg_gdev);
+ mutex_unlock(&vbg_gdev_mutex);
+}
+EXPORT_SYMBOL(vbg_put_gdev);
+
+/**
+ * Device I/O Control entry point.
+ *
+ * @returns -ENOMEM or -EFAULT for errors inside the ioctl callback; 0
+ * on success, or a positive VBox status code on vbox guest-device errors.
+ *
+ * @param pInode Associated inode pointer.
+ * @param pFilp Associated file pointer.
+ * @param uCmd The function specified to ioctl().
+ * @param ulArg The argument specified to ioctl().
+ */
+static long vgdrvLinuxIOCtl(struct file *pFilp, unsigned int uCmd,
+ unsigned long ulArg)
+{
+ PVBOXGUESTSESSION session = (PVBOXGUESTSESSION) pFilp->private_data;
+ u32 cbData = _IOC_SIZE(uCmd);
+ void *pvBufFree;
+ void *pvBuf;
+ int rc, ret = 0;
+ u64 au64Buf[32 / sizeof(u64)];
+
+ /*
+ * For small amounts of data being passed we use a stack based buffer
+ * except for VMMREQUESTs where the data must not be on the stack.
+ */
+ if (cbData <= sizeof(au64Buf) &&
+ VBOXGUEST_IOCTL_STRIP_SIZE(uCmd) !=
+ VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0))) {
+ pvBufFree = NULL;
+ pvBuf = &au64Buf[0];
+ } else {
+ /* __GFP_DMA32 for VBOXGUEST_IOCTL_VMMREQUEST */
+ pvBufFree = pvBuf = kmalloc(cbData, GFP_KERNEL | __GFP_DMA32);
+ if (!pvBuf)
+ return -ENOMEM;
+ }
+ if (copy_from_user(pvBuf, (void *)ulArg, cbData) == 0) {
+ /*
+ * Process the IOCtl.
+ */
+ size_t returned_data_size;
+ rc = VGDrvCommonIoCtl(uCmd, session->gdev, session, pvBuf, cbData,
+ &returned_data_size);
+
+ /*
+ * Copy ioctl data and output buffer back to user space.
+ */
+ if (rc >= 0) {
+ if (returned_data_size > cbData) {
+ vbg_debug("vgdrvLinuxIOCtl: too much output data %zu > %u\n",
+ returned_data_size, cbData);
+ returned_data_size = cbData;
+ }
+ if (returned_data_size > 0) {
+ if (copy_to_user((void *)ulArg, pvBuf,
+ returned_data_size) != 0)
+ ret = -EFAULT;
+ }
+ } else {
+ /* Negate the Vbox status code to make it positive. */
+ ret = -rc;
+ }
+ } else {
+ ret = -EFAULT;
+ }
+
+ kfree(pvBufFree);
+
+ return ret;
+}
+
+/**
+ * ISR callback for mouse events.
+ *
+ * This is called at the end of the ISR, after leaving the event spinlock, if
+ * VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host.
+ *
+ * @param gdev The device extension.
+ */
+void VGDrvNativeISRMousePollEvent(PVBOXGUESTDEVEXT gdev)
+{
+ int rc;
+
+ /* Report events to the kernel input device */
+ gdev->mouse_status_req->mouseFeatures = 0;
+ gdev->mouse_status_req->pointerXPos = 0;
+ gdev->mouse_status_req->pointerYPos = 0;
+ rc = vbg_req_perform(gdev, gdev->mouse_status_req);
+ if (rc >= 0) {
+ input_report_abs(gdev->input, ABS_X,
+ gdev->mouse_status_req->pointerXPos);
+ input_report_abs(gdev->input, ABS_Y,
+ gdev->mouse_status_req->pointerYPos);
+ input_sync(gdev->input);
+ }
+}
+
+static const struct pci_device_id vbg_pci_ids[] = {
+ { .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, vbg_pci_ids);
+
+static struct pci_driver vbg_pci_driver = {
+ .name = DEVICE_NAME,
+ .id_table = vbg_pci_ids,
+ .probe = vbg_pci_probe,
+ .remove = vbg_pci_remove,
+};
+
+module_pci_driver(vbg_pci_driver);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/vboxguest/vboxguest_utils.c b/drivers/misc/vboxguest/vboxguest_utils.c
new file mode 100644
index 000000000000..560d550312a1
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_utils.c
@@ -0,0 +1,1124 @@
+/*
+ * vboxguest vmm-req and hgcm-call code, GenericRequest.c and HGCMInternal.c
+ * in vbox upstream svn.
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/vbox_err.h>
+#include <linux/vbox_utils.h>
+#include "vboxguest_core.h"
+
+/** The max parameter buffer size for a user request. */
+#define VBGLR0_MAX_HGCM_USER_PARM (24 * SZ_1M)
+/** The max parameter buffer size for a kernel request. */
+#define VBGLR0_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
+
+#define VBG_DEBUG_PORT 0x504
+
+/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
+static DEFINE_SPINLOCK(vbg_log_lock);
+static char vbg_log_buf[128];
+
+#define VBG_LOG(name, pr_func) \
+void name(const char *fmt, ...) \
+{ \
+ unsigned long flags; \
+ va_list args; \
+ int i, count; \
+ \
+ va_start(args, fmt); \
+ spin_lock_irqsave(&vbg_log_lock, flags); \
+ \
+ count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
+ for (i = 0; i < count; i++) \
+ outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
+ \
+ pr_func("%s", vbg_log_buf); \
+ \
+ spin_unlock_irqrestore(&vbg_log_lock, flags); \
+ va_end(args); \
+} \
+EXPORT_SYMBOL(name);
+
+VBG_LOG(vbg_info, pr_info)
+VBG_LOG(vbg_warn, pr_warn)
+VBG_LOG(vbg_err, pr_err)
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+VBG_LOG(vbg_debug, pr_debug)
+#endif
+
+/**
+ * Helper to determine the minimum request size for the given request.
+ * Returns 0 if the given operation is not handled and/or supported.
+ *
+ * @returns Size.
+ * @param req The VMMDev request to get the size for.
+ */
+static size_t vbg_req_get_min_size(const VMMDevRequestHeader *req)
+{
+ switch (req->requestType) {
+ case VMMDevReq_GetMouseStatus:
+ case VMMDevReq_SetMouseStatus:
+ return sizeof(VMMDevReqMouseStatus);
+ case VMMDevReq_SetPointerShape:
+ return sizeof(VMMDevReqMousePointer);
+ case VMMDevReq_GetHostVersion:
+ return sizeof(VMMDevReqHostVersion);
+ case VMMDevReq_Idle:
+ return sizeof(VMMDevReqIdle);
+ case VMMDevReq_GetHostTime:
+ return sizeof(VMMDevReqHostTime);
+ case VMMDevReq_GetHypervisorInfo:
+ case VMMDevReq_SetHypervisorInfo:
+ return sizeof(VMMDevReqHypervisorInfo);
+ case VMMDevReq_RegisterPatchMemory:
+ case VMMDevReq_DeregisterPatchMemory:
+ return sizeof(VMMDevReqPatchMemory);
+ case VMMDevReq_SetPowerStatus:
+ return sizeof(VMMDevPowerStateRequest);
+ case VMMDevReq_AcknowledgeEvents:
+ return sizeof(VMMDevEvents);
+ case VMMDevReq_ReportGuestInfo:
+ return sizeof(VMMDevReportGuestInfo);
+ case VMMDevReq_ReportGuestInfo2:
+ return sizeof(VMMDevReportGuestInfo2);
+ case VMMDevReq_ReportGuestStatus:
+ return sizeof(VMMDevReportGuestStatus);
+ case VMMDevReq_ReportGuestUserState:
+ return sizeof(VMMDevReportGuestUserState);
+ case VMMDevReq_GetDisplayChangeRequest:
+ return sizeof(VMMDevDisplayChangeRequest);
+ case VMMDevReq_GetDisplayChangeRequest2:
+ return sizeof(VMMDevDisplayChangeRequest2);
+ case VMMDevReq_GetDisplayChangeRequestEx:
+ return sizeof(VMMDevDisplayChangeRequestEx);
+ case VMMDevReq_VideoModeSupported:
+ return sizeof(VMMDevVideoModeSupportedRequest);
+ case VMMDevReq_GetHeightReduction:
+ return sizeof(VMMDevGetHeightReductionRequest);
+ case VMMDevReq_ReportGuestCapabilities:
+ return sizeof(VMMDevReqGuestCapabilities);
+ case VMMDevReq_SetGuestCapabilities:
+ return sizeof(VMMDevReqGuestCapabilities2);
+ case VMMDevReq_HGCMConnect:
+ return sizeof(VMMDevHGCMConnect);
+ case VMMDevReq_HGCMDisconnect:
+ return sizeof(VMMDevHGCMDisconnect);
+ case VMMDevReq_HGCMCall32:
+ return sizeof(VMMDevHGCMCall);
+ case VMMDevReq_HGCMCall64:
+ return sizeof(VMMDevHGCMCall);
+ case VMMDevReq_HGCMCancel:
+ return sizeof(VMMDevHGCMCancel);
+ case VMMDevReq_VideoAccelEnable:
+ return sizeof(VMMDevVideoAccelEnable);
+ case VMMDevReq_VideoAccelFlush:
+ return sizeof(VMMDevVideoAccelFlush);
+ case VMMDevReq_VideoSetVisibleRegion:
+ /*
+ * The original protocol didn't consider a guest with NO visible
+ * windows.
+ */
+ return sizeof(VMMDevVideoSetVisibleRegion) - sizeof(RTRECT);
+ case VMMDevReq_GetSeamlessChangeRequest:
+ return sizeof(VMMDevSeamlessChangeRequest);
+ case VMMDevReq_QueryCredentials:
+ return sizeof(VMMDevCredentials);
+ case VMMDevReq_ReportGuestStats:
+ return sizeof(VMMDevReportGuestStats);
+ case VMMDevReq_GetMemBalloonChangeRequest:
+ return sizeof(VMMDevGetMemBalloonChangeRequest);
+ case VMMDevReq_GetStatisticsChangeRequest:
+ return sizeof(VMMDevGetStatisticsChangeRequest);
+ case VMMDevReq_ChangeMemBalloon:
+ return sizeof(VMMDevChangeMemBalloon);
+ case VMMDevReq_GetVRDPChangeRequest:
+ return sizeof(VMMDevVRDPChangeRequest);
+ case VMMDevReq_LogString:
+ return sizeof(VMMDevReqLogString);
+ case VMMDevReq_CtlGuestFilterMask:
+ return sizeof(VMMDevCtlGuestFilterMask);
+ case VMMDevReq_GetCpuHotPlugRequest:
+ return sizeof(VMMDevGetCpuHotPlugRequest);
+ case VMMDevReq_SetCpuHotPlugStatus:
+ return sizeof(VMMDevCpuHotPlugStatusRequest);
+ case VMMDevReq_RegisterSharedModule:
+ return sizeof(VMMDevSharedModuleRegistrationRequest);
+ case VMMDevReq_UnregisterSharedModule:
+ return sizeof(VMMDevSharedModuleUnregistrationRequest);
+ case VMMDevReq_CheckSharedModules:
+ return sizeof(VMMDevSharedModuleCheckRequest);
+ case VMMDevReq_GetPageSharingStatus:
+ return sizeof(VMMDevPageSharingStatusRequest);
+ case VMMDevReq_DebugIsPageShared:
+ return sizeof(VMMDevPageIsSharedRequest);
+ case VMMDevReq_GetSessionId:
+ return sizeof(VMMDevReqSessionId);
+ case VMMDevReq_HeartbeatConfigure:
+ return sizeof(VMMDevReqHeartbeat);
+ case VMMDevReq_GuestHeartbeat:
+ return sizeof(VMMDevRequestHeader);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int vbg_req_verify(const VMMDevRequestHeader *req, size_t buffer_size)
+{
+ size_t min_size;
+
+ if (!req || buffer_size < sizeof(VMMDevRequestHeader)) {
+ vbg_debug("VbglGRVerify: Invalid parameter: req = %p, buffer_size = %zu\n",
+ req, buffer_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (req->size > buffer_size) {
+ vbg_debug("VbglGRVerify: request size %u > buffer size %zu\n",
+ req->size, buffer_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ /* The request size must correspond to the request type. */
+ min_size = vbg_req_get_min_size(req);
+
+ if (buffer_size < min_size) {
+ vbg_debug("VbglGRVerify: buffer size %zu < expected size %zu\n",
+ buffer_size, min_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (req->size < min_size) {
+ vbg_debug("VbglGRVerify: header size %u < expected size %zu\n",
+ req->size, min_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ if (buffer_size == min_size) {
+ /*
+ * This is most likely a fixed size request, and in this case the
+ * request size must be also equal to the expected size.
+ */
+ if (req->size != min_size) {
+ vbg_debug("VbglGRVerify: request size %u != expected size %zu\n",
+ req->size, min_size);
+ return VERR_INVALID_PARAMETER;
+ }
+
+ return VINF_SUCCESS;
+ }
+
+ /*
+ * This can be a variable size request. Check the request type and limit
+ * the size to VMMDEV_MAX_VMMDEVREQ_SIZE, which is max size supported by
+ * the host.
+ *
+ * Note: Keep this list sorted for easier human lookup!
+ */
+ if (req->requestType == VMMDevReq_ChangeMemBalloon ||
+ req->requestType == VMMDevReq_HGCMCall32 ||
+ req->requestType == VMMDevReq_HGCMCall64 ||
+ req->requestType == VMMDevReq_RegisterSharedModule ||
+ req->requestType == VMMDevReq_ReportGuestUserState ||
+ req->requestType == VMMDevReq_LogString ||
+ req->requestType == VMMDevReq_SetPointerShape ||
+ req->requestType == VMMDevReq_VideoSetVisibleRegion) {
+ if (buffer_size > VMMDEV_MAX_VMMDEVREQ_SIZE) {
+ vbg_debug("VbglGRVerify: VMMDevReq_LogString: buffer size %zu too big\n",
+ buffer_size);
+ return VERR_BUFFER_OVERFLOW;
+ }
+ } else {
+ vbg_debug("VbglGRVerify: unknown request-type %#08x\n",
+ req->requestType);
+ return VERR_IO_BAD_LENGTH; /* ??? */
+ }
+
+ return VINF_SUCCESS;
+}
+
+void *vbg_req_alloc(size_t len, VMMDevRequestType req_type)
+{
+ VMMDevRequestHeader *req;
+
+ req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
+ if (!req)
+ return NULL;
+
+ memset(req, 0xaa, len);
+
+ req->size = len;
+ req->version = VMMDEV_REQUEST_HEADER_VERSION;
+ req->requestType = req_type;
+ req->rc = VERR_GENERAL_FAILURE;
+ req->reserved1 = 0;
+ req->reserved2 = 0;
+
+ return req;
+}
+
+/* Note this function returns a VBox status code, not a negative errno!! */
+int vbg_req_perform(VBOXGUESTDEVEXT *gdev, void *req)
+{
+ unsigned long phys_req = virt_to_phys(req);
+
+ outl(phys_req, gdev->IOPortBase + VMMDEV_PORT_OFF_REQUEST);
+ mb();
+
+ return ((VMMDevRequestHeader *)req)->rc;
+}
+
+static bool hgcm_req_done(VBOXGUESTDEVEXT *gdev,
+ VMMDevHGCMRequestHeader * header)
+{
+ unsigned long flags;
+ bool done;
+
+ spin_lock_irqsave(&gdev->event_spinlock, flags);
+ done = header->fu32Flags & VBOX_HGCM_REQ_DONE;
+ spin_unlock_irqrestore(&gdev->event_spinlock, flags);
+
+ return done;
+}
+
+int vbg_hgcm_connect(VBOXGUESTDEVEXT *gdev, HGCMServiceLocation * loc,
+ u32 * client_id)
+{
+ VMMDevHGCMConnect *hgcm_connect = NULL;
+ int rc;
+
+ hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
+ VMMDevReq_HGCMConnect);
+ if (!hgcm_connect)
+ return VERR_NO_MEMORY;
+
+ hgcm_connect->header.fu32Flags = 0;
+ memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
+ hgcm_connect->u32ClientID = 0;
+
+ rc = vbg_req_perform(gdev, hgcm_connect);
+
+ if (rc == VINF_HGCM_ASYNC_EXECUTE)
+ wait_event(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &hgcm_connect->header));
+
+ if (rc >= 0) {
+ *client_id = hgcm_connect->u32ClientID;
+ rc = hgcm_connect->header.result;
+ }
+
+ kfree(hgcm_connect);
+
+ return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_connect);
+
+int vbg_hgcm_disconnect(VBOXGUESTDEVEXT *gdev, u32 client_id)
+{
+ VMMDevHGCMDisconnect *hgcm_disconnect = NULL;
+ int rc;
+
+ hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
+ VMMDevReq_HGCMDisconnect);
+ if (!hgcm_disconnect)
+ return VERR_NO_MEMORY;
+
+ hgcm_disconnect->header.fu32Flags = 0;
+ hgcm_disconnect->u32ClientID = client_id;
+
+ rc = vbg_req_perform(gdev, hgcm_disconnect);
+
+ if (rc == VINF_HGCM_ASYNC_EXECUTE)
+ wait_event(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &hgcm_disconnect->header));
+
+ if (rc >= 0)
+ rc = hgcm_disconnect->header.result;
+
+ kfree(hgcm_disconnect);
+
+ return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_disconnect);
+
+static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
+{
+ u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
+
+ return size >> PAGE_SHIFT;
+}
+
+static void hgcm_call_inc_pcb_extra(void *buf, u32 len, size_t * pcb_extra)
+{
+ u32 pages;
+
+ pages = hgcm_call_buf_size_in_pages(buf, len);
+ *pcb_extra += offsetof(HGCMPageListInfo, aPages[pages]);
+}
+
+/* Kernel mode use only, use WARN_ON for sanity checks. */
+static int hgcm_call_check_pagelist(const HGCMFunctionParameter *src_parm,
+ const VBoxGuestHGCMCallInfo *callinfo, u32 callinfo_size,
+ size_t *pcb_extra)
+{
+ HGCMPageListInfo *pg_lst;
+ u32 u, offset, size;
+
+ offset = src_parm->u.PageList.offset;
+ size = src_parm->u.PageList.size;
+ if (!size)
+ return VINF_SUCCESS;
+
+ if (WARN_ON(size > VBGLR0_MAX_HGCM_KERNEL_PARM))
+ return VERR_OUT_OF_RANGE;
+
+ if (WARN_ON(offset < callinfo->cParms * sizeof(HGCMFunctionParameter) ||
+ offset > callinfo_size - sizeof(HGCMPageListInfo)))
+ return VERR_INVALID_PARAMETER;
+
+ pg_lst = (HGCMPageListInfo *)((u8 *)callinfo + offset);
+
+ u = offset + offsetof(HGCMPageListInfo, aPages[pg_lst->cPages]);
+ if (WARN_ON(u > callinfo_size))
+ return VERR_INVALID_PARAMETER;
+
+ if (WARN_ON(pg_lst->offFirstPage >= PAGE_SIZE))
+ return VERR_INVALID_PARAMETER;
+
+ u = PAGE_ALIGN(pg_lst->offFirstPage + size) >> PAGE_SHIFT;
+ if (WARN_ON(u != pg_lst->cPages))
+ return VERR_INVALID_PARAMETER;
+
+ if (WARN_ON(!VBOX_HGCM_F_PARM_ARE_VALID(pg_lst->flags)))
+ return VERR_INVALID_PARAMETER;
+
+ for (u = 0; u < pg_lst->cPages; u++) {
+ if (WARN_ON(pg_lst->aPages[u] &
+ (0xfff0000000000000ULL | ~PAGE_MASK)))
+ return VERR_INVALID_PARAMETER;
+ }
+
+ *pcb_extra += offsetof(HGCMPageListInfo, aPages[pg_lst->cPages]);
+
+ return VINF_SUCCESS;
+}
+
+static int hgcm_call_preprocess_linaddr(const HGCMFunctionParameter *src_parm,
+ bool is_user, void **bounce_buf_ret,
+ size_t *pcb_extra)
+{
+ void *buf, *bounce_buf;
+ bool copy_in;
+ u32 len;
+ int ret;
+
+ buf = (void *)src_parm->u.Pointer.u.linearAddr;
+ len = src_parm->u.Pointer.size;
+ copy_in = src_parm->type != VMMDevHGCMParmType_LinAddr_Out;
+
+ if (!is_user) {
+ if (WARN_ON(len > VBGLR0_MAX_HGCM_KERNEL_PARM))
+ return VERR_OUT_OF_RANGE;
+
+ hgcm_call_inc_pcb_extra(buf, len, pcb_extra);
+ return VINF_SUCCESS;
+ }
+
+ if (len > VBGLR0_MAX_HGCM_USER_PARM)
+ return VERR_OUT_OF_RANGE;
+
+ if (len <= PAGE_SIZE * 2)
+ bounce_buf = kmalloc(len, GFP_KERNEL);
+ else
+ bounce_buf = vmalloc(len);
+
+ if (!bounce_buf)
+ return VERR_NO_MEMORY;
+
+ if (copy_in) {
+ ret = copy_from_user(bounce_buf, (void __user *)buf, len);
+ if (ret)
+ return VERR_ACCESS_DENIED;
+ } else {
+ memset(bounce_buf, 0, len);
+ }
+
+ *bounce_buf_ret = bounce_buf;
+ hgcm_call_inc_pcb_extra(bounce_buf, len, pcb_extra);
+ return VINF_SUCCESS;
+}
+
+/**
+ * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
+ * figure out how much extra storage we need for page lists.
+ *
+ * @returns VBox status code
+ *
+ * @param call The call info.
+ * @param call_size The size of the call info structure.
+ * @param is_user Is it a user request or kernel request.
+ * @param bounce_bufs_ret Where to return the allocated bouncebuffer array
+ * @param pcb_extra Where to return the extra request space needed for
+ * physical page lists.
+ */
+static int hgcm_call_preprocess(const VBoxGuestHGCMCallInfo *call,
+ u32 call_size, bool is_user, void ***bounce_bufs_ret, size_t *pcb_extra)
+{
+ const HGCMFunctionParameter *src_parm = VBOXGUEST_HGCM_CALL_PARMS(call);
+ u32 i, parms = call->cParms;
+ void **bounce_bufs = NULL;
+ int rc;
+
+ *bounce_bufs_ret = NULL;
+ *pcb_extra = 0;
+
+ for (i = 0; i < parms; i++, src_parm++) {
+ switch (src_parm->type) {
+ case VMMDevHGCMParmType_32bit:
+ case VMMDevHGCMParmType_64bit:
+ break;
+
+ case VMMDevHGCMParmType_PageList:
+ if (is_user)
+ return VERR_INVALID_PARAMETER;
+
+ rc = hgcm_call_check_pagelist(src_parm, call,
+ call_size, pcb_extra);
+ if (rc)
+ return rc;
+
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ if (is_user && !bounce_bufs) {
+ bounce_bufs =
+ (void **)kcalloc(parms, sizeof(void *),
+ GFP_KERNEL);
+ if (!bounce_bufs)
+ return VERR_NO_MEMORY;
+
+ *bounce_bufs_ret = bounce_bufs;
+ }
+
+ rc = hgcm_call_preprocess_linaddr(src_parm, is_user,
+ &bounce_bufs[i],
+ pcb_extra);
+ if (rc)
+ return rc;
+
+ break;
+
+ default:
+ return VERR_INVALID_PARAMETER;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+/**
+ * Translates linear address types to page list direction flags.
+ *
+ * @returns page list flags.
+ * @param enmType The type.
+ */
+static u32
+vbglR0HGCMInternalLinAddrTypeToPageListFlags(HGCMFunctionParameterType enmType)
+{
+ switch (enmType) {
+ case VMMDevHGCMParmType_LinAddr_In:
+ return VBOX_HGCM_F_PARM_DIRECTION_TO_HOST;
+
+ case VMMDevHGCMParmType_LinAddr_Out:
+ return VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST;
+
+ default:
+ WARN_ON(1);
+ case VMMDevHGCMParmType_LinAddr:
+ return VBOX_HGCM_F_PARM_DIRECTION_BOTH;
+ }
+}
+
+static void hgcm_call_init_linaddr(VMMDevHGCMCall *call,
+ HGCMFunctionParameter *dst_parm,
+ void *buf, u32 len,
+ HGCMFunctionParameterType type,
+ u32 *off_extra)
+{
+ HGCMPageListInfo *dst_pg_lst;
+ struct page *page;
+ bool is_vmalloc;
+ u32 i, pages;
+
+ dst_parm->type = type;
+
+ if (len == 0) {
+ dst_parm->u.Pointer.size = 0;
+ dst_parm->u.Pointer.u.linearAddr = 0;
+ return;
+ }
+
+ dst_pg_lst = (void *)call + *off_extra;
+ pages = hgcm_call_buf_size_in_pages(buf, len);
+ is_vmalloc = is_vmalloc_addr(buf);
+
+ dst_parm->type = VMMDevHGCMParmType_PageList;
+ dst_parm->u.PageList.size = len;
+ dst_parm->u.PageList.offset = *off_extra;
+ dst_pg_lst->flags = vbglR0HGCMInternalLinAddrTypeToPageListFlags(type);
+ dst_pg_lst->offFirstPage = (unsigned long)buf & ~PAGE_MASK;
+ dst_pg_lst->cPages = pages;
+
+ for (i = 0; i < pages; i++) {
+ if (is_vmalloc)
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ dst_pg_lst->aPages[i] = page_to_phys(page);
+ buf += PAGE_SIZE;
+ }
+
+ *off_extra += offsetof(HGCMPageListInfo, aPages[pages]);
+}
+
+/**
+ * Initializes the call request that we're sending to the host.
+ *
+ * @param pHGCMCall The call to initialize.
+ * @param pCallInfo The call info.
+ * @param cbCallInfo The size of the call info structure.
+ * @param fIsUser Is it a user request or kernel request.
+ * @param bounce_bufs The bouncebuffer array.
+ */
+static void vbglR0HGCMInternalInitCall(VMMDevHGCMCall * pHGCMCall,
+ VBoxGuestHGCMCallInfo const *pCallInfo,
+ u32 cbCallInfo, void **bounce_bufs)
+{
+ HGCMFunctionParameter const *pSrcParm =
+ VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
+ HGCMFunctionParameter *pDstParm = VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
+ u32 cParms = pCallInfo->cParms;
+ u32 offExtra =
+ (u32) ((uintptr_t) (pDstParm + cParms) -
+ (uintptr_t) pHGCMCall);
+ u32 iParm;
+ void *buf;
+
+ /*
+ * The call request headers.
+ */
+ pHGCMCall->header.fu32Flags = 0;
+ pHGCMCall->header.result = VINF_SUCCESS;
+
+ pHGCMCall->u32ClientID = pCallInfo->u32ClientID;
+ pHGCMCall->u32Function = pCallInfo->u32Function;
+ pHGCMCall->cParms = cParms;
+
+ /*
+ * The parameters.
+ */
+ for (iParm = 0; iParm < pCallInfo->cParms;
+ iParm++, pSrcParm++, pDstParm++) {
+ switch (pSrcParm->type) {
+ case VMMDevHGCMParmType_32bit:
+ case VMMDevHGCMParmType_64bit:
+ *pDstParm = *pSrcParm;
+ break;
+
+ case VMMDevHGCMParmType_PageList:
+ pDstParm->type = VMMDevHGCMParmType_PageList;
+ pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
+ if (pSrcParm->u.PageList.size) {
+ HGCMPageListInfo const *pSrcPgLst =
+ (HGCMPageListInfo *) ((u8 *) pCallInfo
+ +
+ pSrcParm->u.PageList.
+ offset);
+ HGCMPageListInfo *pDstPgLst =
+ (HGCMPageListInfo *) ((u8 *) pHGCMCall
+ + offExtra);
+ u32 const cPages = pSrcPgLst->cPages;
+ u32 iPage;
+
+ pDstParm->u.PageList.offset = offExtra;
+ pDstPgLst->flags = pSrcPgLst->flags;
+ pDstPgLst->offFirstPage =
+ pSrcPgLst->offFirstPage;
+ pDstPgLst->cPages = cPages;
+ for (iPage = 0; iPage < cPages; iPage++)
+ pDstPgLst->aPages[iPage] =
+ pSrcPgLst->aPages[iPage];
+
+ offExtra +=
+ offsetof(HGCMPageListInfo,
+ aPages[cPages]);
+ } else
+ pDstParm->u.PageList.offset = 0;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ if (bounce_bufs && bounce_bufs[iParm])
+ buf = bounce_bufs[iParm];
+ else
+ buf = (void *)pSrcParm->u.Pointer.u.linearAddr;
+
+ hgcm_call_init_linaddr(pHGCMCall, pDstParm, buf,
+ pSrcParm->u.Pointer.size,
+ pSrcParm->type, &offExtra);
+ break;
+
+ default:
+ WARN_ON(1);
+ pDstParm->type = VMMDevHGCMParmType_Invalid;
+ }
+ }
+}
+
+/* Note this function returns a VBox status code, not a negative errno!! */
+static int hgcm_cancel_call(VBOXGUESTDEVEXT *gdev, VMMDevHGCMCall * call)
+{
+ int rc;
+
+ /*
+ * We use a pre-allocated request for cancellations, which is
+ * protected by cancel_req_mutex. This means that all cancellations
+ * get serialized, this should be fine since they should be rare.
+ */
+ mutex_lock(&gdev->cancel_req_mutex);
+ gdev->cancel_req->physReqToCancel = virt_to_phys(call);
+ rc = vbg_req_perform(gdev, gdev->cancel_req);
+ mutex_unlock(&gdev->cancel_req_mutex);
+
+ /** @todo ADDVER: Remove this on next minor version change. */
+ if (rc == VERR_NOT_IMPLEMENTED) {
+ call->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
+ call->header.header.requestType = VMMDevReq_HGCMCancel;
+
+ rc = vbg_req_perform(gdev, call);
+ if (rc == VERR_INVALID_PARAMETER)
+ rc = VERR_NOT_FOUND;
+ }
+
+ if (rc >= 0)
+ call->header.fu32Flags |= VBOX_HGCM_REQ_CANCELLED;
+
+ return rc;
+}
+
+/**
+ * Performs the call and completion wait.
+ *
+ * @returns VBox status code
+ *
+ * @param gdev The VBoxGuest device extension.
+ * @param call The HGCM call info.
+ * @param timeout_ms Timeout in ms.
+ * @param is_user Is this an in kernel call or from userspace ?
+ * @param leak_it Where to return the leak it / free it,
+ * indicator. Cancellation fun.
+ */
+static int vbg_hgcm_do_call(VBOXGUESTDEVEXT *gdev, VMMDevHGCMCall *call,
+ u32 timeout_ms, bool is_user, bool *leak_it)
+{
+ long timeout;
+ int rc, cancel_rc;
+
+ *leak_it = false;
+
+ rc = vbg_req_perform(gdev, call);
+
+ /*
+ * If the call failed, then pretend success.
+ * Upper layers will interpret the result code in the packet.
+ */
+ if (rc < 0) {
+ WARN_ON(!(call->header.fu32Flags & VBOX_HGCM_REQ_DONE));
+ return VINF_SUCCESS;
+ }
+
+ if (rc != VINF_HGCM_ASYNC_EXECUTE)
+ return rc;
+
+ /* Host decided to process the request asynchronously, wait for it */
+ if (timeout_ms == U32_MAX)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(timeout_ms);
+
+ if (is_user) {
+ timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
+ hgcm_req_done
+ (gdev,
+ &call->header),
+ timeout);
+ } else {
+ timeout = wait_event_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev,
+ &call->header),
+ timeout);
+ }
+
+ /* timeout > 0 means hgcm_req_done has returned true, so success */
+ if (timeout > 0)
+ return VINF_SUCCESS;
+
+ if (timeout == 0)
+ rc = VERR_TIMEOUT;
+ else
+ rc = VERR_INTERRUPTED;
+
+ /* Cancel the request */
+ cancel_rc = hgcm_cancel_call(gdev, call);
+ if (cancel_rc >= 0)
+ return rc;
+
+ /*
+ * Failed to cancel, this should mean that the cancel has lost the
+ * race with normal completion, wait while the host completes it.
+ */
+ if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
+ timeout = msecs_to_jiffies(500);
+ else
+ timeout = msecs_to_jiffies(2000);
+
+ timeout = wait_event_timeout(gdev->hgcm_wq,
+ hgcm_req_done(gdev, &call->header),
+ timeout);
+
+ if (WARN_ON(timeout == 0)) {
+ /* We really should never get here */
+ vbg_err("vbg_hgcm_do_call: Call timedout and cancelation failed, leaking the request\n");
+ *leak_it = true;
+ return rc;
+ }
+
+ /* The call has completed normally after all */
+ return VINF_SUCCESS;
+}
+
+/**
+ * Copies the result of the call back to the caller info structure and user
+ * buffers.
+ *
+ * @returns VBox status code
+ * @param pCallInfo Call info structure to update.
+ * @param pHGCMCall HGCM call request.
+ * @param bounce_bufs The bouncebuffer array.
+ */
+static int vbglR0HGCMInternalCopyBackResult(VBoxGuestHGCMCallInfo * pCallInfo,
+ VMMDevHGCMCall const *pHGCMCall,
+ void **bounce_bufs)
+{
+ HGCMFunctionParameter const *pSrcParm =
+ VMMDEV_HGCM_CALL_PARMS(pHGCMCall);
+ HGCMFunctionParameter *pDstParm = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo);
+ u32 cParms = pCallInfo->cParms;
+ u32 iParm;
+ int ret;
+
+ /*
+ * The call result.
+ */
+ pCallInfo->result = pHGCMCall->header.result;
+
+ /*
+ * Copy back parameters.
+ */
+ for (iParm = 0; iParm < cParms; iParm++, pSrcParm++, pDstParm++) {
+ switch (pDstParm->type) {
+ case VMMDevHGCMParmType_32bit:
+ case VMMDevHGCMParmType_64bit:
+ *pDstParm = *pSrcParm;
+ break;
+
+ case VMMDevHGCMParmType_PageList:
+ pDstParm->u.PageList.size = pSrcParm->u.PageList.size;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_In:
+ pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ if (bounce_bufs && bounce_bufs[iParm]) {
+ ret = copy_to_user((void __user *)
+ pDstParm->u.Pointer.u.
+ linearAddr,
+ bounce_bufs[iParm],
+ min(pSrcParm->u.Pointer.size,
+ pDstParm->u.Pointer.
+ size));
+ if (ret)
+ return VERR_ACCESS_DENIED;
+ }
+ pDstParm->u.Pointer.size = pSrcParm->u.Pointer.size;
+ break;
+
+ default:
+ WARN_ON(1);
+ return VERR_INTERNAL_ERROR_4;
+ }
+ }
+
+ return VINF_SUCCESS;
+}
+
+int vbg_hgcm_call(VBOXGUESTDEVEXT *gdev, VBoxGuestHGCMCallInfo *pCallInfo,
+ u32 cbCallInfo, u32 timeout_ms, bool is_user)
+{
+ VMMDevHGCMCall *pHGCMCall;
+ void **bounce_bufs;
+ size_t cbExtra;
+ bool leak_it;
+ int i, rc;
+
+ /*
+ * Validate, lock and buffer the parameters for the call.
+ * This will calculate the amount of extra space for physical page list.
+ */
+ rc = hgcm_call_preprocess(pCallInfo, cbCallInfo, is_user,
+ &bounce_bufs, &cbExtra);
+ if (rc) {
+ /* Even on error bounce bufs may still have been allocated */
+ goto free_bounce_bufs;
+ }
+
+ pHGCMCall = vbg_req_alloc(sizeof(VMMDevHGCMCall) + pCallInfo->cParms *
+ sizeof(HGCMFunctionParameter) + cbExtra,
+ VMMDevReq_HGCMCall);
+ if (!pHGCMCall) {
+ rc = VERR_NO_MEMORY;
+ goto free_bounce_bufs;
+ }
+
+ vbglR0HGCMInternalInitCall(pHGCMCall, pCallInfo, cbCallInfo, bounce_bufs);
+
+ rc = vbg_hgcm_do_call(gdev, pHGCMCall, timeout_ms, is_user, &leak_it);
+ if (rc >= 0)
+ rc = vbglR0HGCMInternalCopyBackResult(pCallInfo, pHGCMCall,
+ bounce_bufs);
+
+ if (!leak_it)
+ kfree(pHGCMCall);
+
+free_bounce_bufs:
+ if (bounce_bufs) {
+ for (i = 0; i < pCallInfo->cParms; i++) {
+ if (is_vmalloc_addr(bounce_bufs[i]))
+ vfree(bounce_bufs[i]);
+ else
+ kfree(bounce_bufs[i]);
+ }
+ kfree(bounce_bufs);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(vbg_hgcm_call);
+
+#ifdef CONFIG_X86_64
+int vbg_hgcm_call32(VBOXGUESTDEVEXT *gdev, VBoxGuestHGCMCallInfo * pCallInfo,
+ u32 cbCallInfo, u32 timeout_ms, bool is_user)
+{
+ VBoxGuestHGCMCallInfo *pCallInfo64 = NULL;
+ HGCMFunctionParameter *pParm64 = NULL;
+ HGCMFunctionParameter32 *pParm32 = NULL;
+ u32 cParms = pCallInfo->cParms;
+ u32 iParm;
+ int rc = VINF_SUCCESS;
+
+ /*
+ * The simple approach, allocate a temporary request and convert the parameters.
+ */
+ pCallInfo64 = kzalloc(sizeof(*pCallInfo64) +
+ cParms * sizeof(HGCMFunctionParameter),
+ GFP_KERNEL);
+ if (!pCallInfo64)
+ return VERR_NO_MEMORY;
+
+ *pCallInfo64 = *pCallInfo;
+ pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
+ pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
+ for (iParm = 0; iParm < cParms; iParm++, pParm32++, pParm64++) {
+ switch (pParm32->type) {
+ case VMMDevHGCMParmType_32bit:
+ pParm64->type = VMMDevHGCMParmType_32bit;
+ pParm64->u.value32 = pParm32->u.value32;
+ break;
+
+ case VMMDevHGCMParmType_64bit:
+ pParm64->type = VMMDevHGCMParmType_64bit;
+ pParm64->u.value64 = pParm32->u.value64;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ case VMMDevHGCMParmType_LinAddr_In:
+ pParm64->type = pParm32->type;
+ pParm64->u.Pointer.size = pParm32->u.Pointer.size;
+ pParm64->u.Pointer.u.linearAddr =
+ pParm32->u.Pointer.u.linearAddr;
+ break;
+
+ default:
+ rc = VERR_INVALID_PARAMETER;
+ }
+ if (rc < 0)
+ goto out_free;
+ }
+
+ rc = vbg_hgcm_call(gdev, pCallInfo64,
+ sizeof(*pCallInfo64) +
+ cParms * sizeof(HGCMFunctionParameter),
+ timeout_ms, is_user);
+ if (rc >= 0) {
+ *pCallInfo = *pCallInfo64;
+
+ /*
+ * Copy back.
+ */
+ pParm32 = VBOXGUEST_HGCM_CALL_PARMS32(pCallInfo);
+ pParm64 = VBOXGUEST_HGCM_CALL_PARMS(pCallInfo64);
+ for (iParm = 0; iParm < cParms;
+ iParm++, pParm32++, pParm64++) {
+ switch (pParm64->type) {
+ case VMMDevHGCMParmType_32bit:
+ pParm32->u.value32 = pParm64->u.value32;
+ break;
+
+ case VMMDevHGCMParmType_64bit:
+ pParm32->u.value64 = pParm64->u.value64;
+ break;
+
+ case VMMDevHGCMParmType_LinAddr_Out:
+ case VMMDevHGCMParmType_LinAddr:
+ case VMMDevHGCMParmType_LinAddr_In:
+ pParm32->u.Pointer.size =
+ pParm64->u.Pointer.size;
+ break;
+ default:
+ WARN_ON(1);
+ rc = VERR_INTERNAL_ERROR_3;
+ }
+ }
+ }
+
+out_free:
+ kfree(pCallInfo64);
+ return rc;
+}
+#endif
+
+int vbg_status_code_to_errno(int rc)
+{
+ if (rc >= 0)
+ return 0;
+
+ switch (rc) {
+ case VERR_ACCESS_DENIED: return EPERM;
+ case VERR_FILE_NOT_FOUND: return ENOENT;
+ case VERR_PROCESS_NOT_FOUND: return ESRCH;
+ case VERR_INTERRUPTED: return EINTR;
+ case VERR_DEV_IO_ERROR: return EIO;
+ case VERR_TOO_MUCH_DATA: return E2BIG;
+ case VERR_BAD_EXE_FORMAT: return ENOEXEC;
+ case VERR_INVALID_HANDLE: return EBADF;
+ case VERR_TRY_AGAIN: return EAGAIN;
+ case VERR_NO_MEMORY: return ENOMEM;
+ case VERR_INVALID_POINTER: return EFAULT;
+ case VERR_RESOURCE_BUSY: return EBUSY;
+ case VERR_ALREADY_EXISTS: return EEXIST;
+ case VERR_NOT_SAME_DEVICE: return EXDEV;
+ case VERR_NOT_A_DIRECTORY:
+ case VERR_PATH_NOT_FOUND: return ENOTDIR;
+ case VERR_IS_A_DIRECTORY: return EISDIR;
+ case VERR_INVALID_PARAMETER: return EINVAL;
+ case VERR_TOO_MANY_OPEN_FILES: return ENFILE;
+ case VERR_INVALID_FUNCTION: return ENOTTY;
+ case VERR_SHARING_VIOLATION: return ETXTBSY;
+ case VERR_FILE_TOO_BIG: return EFBIG;
+ case VERR_DISK_FULL: return ENOSPC;
+ case VERR_SEEK_ON_DEVICE: return ESPIPE;
+ case VERR_WRITE_PROTECT: return EROFS;
+ case VERR_BROKEN_PIPE: return EPIPE;
+ case VERR_DEADLOCK: return EDEADLK;
+ case VERR_FILENAME_TOO_LONG: return ENAMETOOLONG;
+ case VERR_FILE_LOCK_FAILED: return ENOLCK;
+ case VERR_NOT_IMPLEMENTED:
+ case VERR_NOT_SUPPORTED: return ENOSYS;
+ case VERR_DIR_NOT_EMPTY: return ENOTEMPTY;
+ case VERR_TOO_MANY_SYMLINKS: return ELOOP;
+ case VERR_NO_DATA: return ENODATA;
+ case VERR_NET_NO_NETWORK: return ENONET;
+ case VERR_NET_NOT_UNIQUE_NAME: return ENOTUNIQ;
+ case VERR_NO_TRANSLATION: return EILSEQ;
+ case VERR_NET_NOT_SOCKET: return ENOTSOCK;
+ case VERR_NET_DEST_ADDRESS_REQUIRED: return EDESTADDRREQ;
+ case VERR_NET_MSG_SIZE: return EMSGSIZE;
+ case VERR_NET_PROTOCOL_TYPE: return EPROTOTYPE;
+ case VERR_NET_PROTOCOL_NOT_AVAILABLE: return ENOPROTOOPT;
+ case VERR_NET_PROTOCOL_NOT_SUPPORTED: return EPROTONOSUPPORT;
+ case VERR_NET_SOCKET_TYPE_NOT_SUPPORTED: return ESOCKTNOSUPPORT;
+ case VERR_NET_OPERATION_NOT_SUPPORTED: return EOPNOTSUPP;
+ case VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED: return EPFNOSUPPORT;
+ case VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED: return EAFNOSUPPORT;
+ case VERR_NET_ADDRESS_IN_USE: return EADDRINUSE;
+ case VERR_NET_ADDRESS_NOT_AVAILABLE: return EADDRNOTAVAIL;
+ case VERR_NET_DOWN: return ENETDOWN;
+ case VERR_NET_UNREACHABLE: return ENETUNREACH;
+ case VERR_NET_CONNECTION_RESET: return ENETRESET;
+ case VERR_NET_CONNECTION_ABORTED: return ECONNABORTED;
+ case VERR_NET_CONNECTION_RESET_BY_PEER: return ECONNRESET;
+ case VERR_NET_NO_BUFFER_SPACE: return ENOBUFS;
+ case VERR_NET_ALREADY_CONNECTED: return EISCONN;
+ case VERR_NET_NOT_CONNECTED: return ENOTCONN;
+ case VERR_NET_SHUTDOWN: return ESHUTDOWN;
+ case VERR_NET_TOO_MANY_REFERENCES: return ETOOMANYREFS;
+ case VERR_TIMEOUT: return ETIMEDOUT;
+ case VERR_NET_CONNECTION_REFUSED: return ECONNREFUSED;
+ case VERR_NET_HOST_DOWN: return EHOSTDOWN;
+ case VERR_NET_HOST_UNREACHABLE: return EHOSTUNREACH;
+ case VERR_NET_ALREADY_IN_PROGRESS: return EALREADY;
+ case VERR_NET_IN_PROGRESS: return EINPROGRESS;
+ case VERR_MEDIA_NOT_PRESENT: return ENOMEDIUM;
+ case VERR_MEDIA_NOT_RECOGNIZED: return EMEDIUMTYPE;
+ default:
+ vbg_warn("vbg_status_code_to_errno: Unhandled err %d\n", rc);
+ return EPROTO;
+ }
+}
+EXPORT_SYMBOL(vbg_status_code_to_errno);
diff --git a/drivers/misc/vboxguest/vboxguest_version.h b/drivers/misc/vboxguest/vboxguest_version.h
new file mode 100644
index 000000000000..47a53151fcd0
--- /dev/null
+++ b/drivers/misc/vboxguest/vboxguest_version.h
@@ -0,0 +1,18 @@
+/*
+ * VBox Guest additions version info, this is used by the host to determine
+ * supported guest-addition features in some cases. So this will need to be
+ * synced with vbox upstreams versioning scheme when we implement / port
+ * new features from the upstream out-of-tree vboxguest driver.
+ */
+
+#ifndef __VBOX_VERSION_H__
+#define __VBOX_VERSION_H__
+
+/* Last synced July 12th 2017 */
+#define VBOX_VERSION_MAJOR 5
+#define VBOX_VERSION_MINOR 1
+#define VBOX_VERSION_BUILD 51
+#define VBOX_SVN_REV 67325
+#define VBOX_VERSION_STRING "5.1.51"
+
+#endif
diff --git a/include/linux/vbox_err.h b/include/linux/vbox_err.h
new file mode 100644
index 000000000000..906ff7d2585d
--- /dev/null
+++ b/include/linux/vbox_err.h
@@ -0,0 +1,6 @@
+#ifndef __VBOX_ERR_H__
+#define __VBOX_ERR_H__
+
+#include <uapi/linux/vbox_err.h>
+
+#endif
diff --git a/include/linux/vbox_ostypes.h b/include/linux/vbox_ostypes.h
new file mode 100644
index 000000000000..ea2a391f135f
--- /dev/null
+++ b/include/linux/vbox_ostypes.h
@@ -0,0 +1,6 @@
+#ifndef __VBOX_OSTYPES_H__
+#define __VBOX_OSTYPES_H__
+
+#include <uapi/linux/vbox_ostypes.h>
+
+#endif
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
new file mode 100644
index 000000000000..fcb77249dfe7
--- /dev/null
+++ b/include/linux/vbox_utils.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOX_UTILS_H__
+#define __VBOX_UTILS_H__
+
+#include <linux/printk.h>
+#include <linux/vbox_vmmdev.h>
+#include <linux/vboxguest.h>
+
+struct VBOXGUESTDEVEXT;
+
+/**
+ * vboxguest logging functions, these log both to the backdoor and call
+ * the equivalent kernel pr_foo function.
+ */
+__printf(1, 2) void vbg_info(const char *fmt, ...);
+__printf(1, 2) void vbg_warn(const char *fmt, ...);
+__printf(1, 2) void vbg_err(const char *fmt, ...);
+
+/* Only use backdoor logging for non-dynamic debug builds */
+#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
+__printf(1, 2) void vbg_debug(const char *fmt, ...);
+#else
+#define vbg_debug pr_debug
+#endif
+
+/** @name Generic request functions.
+ * @{
+ */
+
+/**
+ * Allocate memory for generic request and initialize the request header.
+ *
+ * @returns the allocated memory
+ * @param len Size of memory block required for the request.
+ * @param req_type The generic request type.
+ */
+void *vbg_req_alloc(size_t len, VMMDevRequestType req_type);
+
+/**
+ * Perform a generic request.
+ *
+ * @returns VBox status code
+ * @param gdev The Guest extension device.
+ * @param req Pointer the request structure.
+ */
+int vbg_req_perform(struct VBOXGUESTDEVEXT *gdev, void *req);
+
+/**
+ * Verify the generic request header.
+ *
+ * @returns VBox status code
+ * @param req pointer the request header structure.
+ * @param buffer_size size of the request memory block. It should be equal to
+ * the request size for fixed size requests. It can be
+ * greater than the request size for variable size requests.
+ */
+int vbg_req_verify(const VMMDevRequestHeader *req, size_t buffer_size);
+/** @} */
+
+int vbg_hgcm_connect(struct VBOXGUESTDEVEXT *gdev, HGCMServiceLocation *loc,
+ u32 *client_id);
+
+int vbg_hgcm_disconnect(struct VBOXGUESTDEVEXT *gdev, u32 client_id);
+
+int vbg_hgcm_call(struct VBOXGUESTDEVEXT *gdev,
+ VBoxGuestHGCMCallInfo *pCallInfo, u32 cbCallInfo,
+ u32 timeout_ms, bool is_user);
+
+int vbg_hgcm_call32(struct VBOXGUESTDEVEXT *gdev,
+ VBoxGuestHGCMCallInfo *pCallInfo, u32 cbCallInfo,
+ u32 timeout_ms, bool is_user);
+
+int vbg_status_code_to_errno(int rc);
+
+struct VBOXGUESTDEVEXT *vbg_get_gdev(void);
+void vbg_put_gdev(struct VBOXGUESTDEVEXT *gdev);
+
+#endif
diff --git a/include/linux/vbox_vmmdev.h b/include/linux/vbox_vmmdev.h
new file mode 100644
index 000000000000..8867c1f4eb08
--- /dev/null
+++ b/include/linux/vbox_vmmdev.h
@@ -0,0 +1,128 @@
+/*
+ * Virtual Device for Guest <-> VMM/Host communication (ADD,DEV).
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __VBOX_VMMDEV_H__
+#define __VBOX_VMMDEV_H__
+
+#include <linux/sizes.h>
+#include <uapi/linux/vbox_vmmdev.h>
+
+/**
+ * @name VBVA ring defines.
+ *
+ * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
+ * data. For example big bitmaps which do not fit to the buffer.
+ *
+ * Guest starts writing to the buffer by initializing a record entry in the
+ * aRecords queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
+ * written. As data is written to the ring buffer, the guest increases off32End
+ * for the record.
+ *
+ * The host reads the aRecords on flushes and processes all completed records.
+ * When host encounters situation when only a partial record presents and
+ * cbRecord & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
+ * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
+ * off32Head. After that on each flush the host continues fetching the data
+ * until the record is completed.
+ *
+ * @{
+ */
+#define VMMDEV_VBVA_RING_BUFFER_SIZE (SZ_4M - SZ_1K)
+#define VMMDEV_VBVA_RING_BUFFER_THRESHOLD (SZ_4K)
+
+#define VMMDEV_VBVA_MAX_RECORDS (64)
+/** @} */
+
+/** VBVA record. */
+typedef struct VMMDEVVBVARECORD {
+ /** The length of the record. Changed by guest. */
+ u32 cbRecord;
+} VMMDEVVBVARECORD;
+VMMDEV_ASSERT_SIZE(VMMDEVVBVARECORD, 4);
+
+/**
+ * VBVA memory layout.
+ *
+ * This is a subsection of the VMMDevMemory structure.
+ */
+typedef struct VBVAMEMORY {
+ /** VBVA_F_MODE_*. */
+ u32 fu32ModeFlags;
+
+ /** The offset where the data start in the buffer. */
+ u32 off32Data;
+ /** The offset where next data must be placed in the buffer. */
+ u32 off32Free;
+
+ /** The ring buffer for data. */
+ u8 au8RingBuffer[VMMDEV_VBVA_RING_BUFFER_SIZE];
+
+ /** The queue of record descriptions. */
+ VMMDEVVBVARECORD aRecords[VMMDEV_VBVA_MAX_RECORDS];
+ u32 indexRecordFirst;
+ u32 indexRecordFree;
+
+ /**
+ * RDP orders supported by the client. The guest reports only them
+ * and falls back to DIRTY rects for not supported ones.
+ *
+ * (1 << VBVA_VRDP_*)
+ */
+ u32 fu32SupportedOrders;
+
+} VBVAMEMORY;
+VMMDEV_ASSERT_SIZE(VBVAMEMORY, 12 + (SZ_4M-SZ_1K) + 4*64 + 12);
+
+/**
+ * The layout of VMMDEV RAM region that contains information for guest.
+ */
+typedef struct VMMDevMemory {
+ /** The size of this structure. */
+ u32 u32Size;
+ /** The structure version. (VMMDEV_MEMORY_VERSION) */
+ u32 u32Version;
+
+ union {
+ struct {
+ /** Flag telling that VMMDev has events pending. */
+ bool fHaveEvents;
+ } V1_04;
+
+ struct {
+ /** Pending events flags, set by host. */
+ u32 u32HostEvents;
+ /** Mask of events the guest wants, set by guest. */
+ u32 u32GuestEventMask;
+ } V1_03;
+ } V;
+
+ VBVAMEMORY vbvaMemory;
+
+} VMMDevMemory;
+VMMDEV_ASSERT_SIZE(VMMDevMemory, 8 + 8 + sizeof(VBVAMEMORY));
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevMemory, vbvaMemory, 16);
+
+/** Version of VMMDevMemory structure (VMMDevMemory::u32Version). */
+#define VMMDEV_MEMORY_VERSION (1)
+
+#endif
diff --git a/include/linux/vboxguest.h b/include/linux/vboxguest.h
new file mode 100644
index 000000000000..fca5d199a884
--- /dev/null
+++ b/include/linux/vboxguest.h
@@ -0,0 +1,6 @@
+#ifndef __VBOXGUEST_H__
+#define __VBOXGUEST_H__
+
+#include <uapi/linux/vboxguest.h>
+
+#endif
diff --git a/include/uapi/linux/vbox_err.h b/include/uapi/linux/vbox_err.h
new file mode 100644
index 000000000000..e6e7ba835e36
--- /dev/null
+++ b/include/uapi/linux/vbox_err.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __UAPI_VBOX_ERR_H__
+#define __UAPI_VBOX_ERR_H__
+
+/**
+ * @name VirtualBox virtual-hardware error macros
+ * @{
+ */
+
+#define VINF_SUCCESS 0
+#define VERR_GENERAL_FAILURE (-1)
+#define VERR_INVALID_PARAMETER (-2)
+#define VERR_INVALID_MAGIC (-3)
+#define VERR_INVALID_HANDLE (-4)
+#define VERR_LOCK_FAILED (-5)
+#define VERR_INVALID_POINTER (-6)
+#define VERR_IDT_FAILED (-7)
+#define VERR_NO_MEMORY (-8)
+#define VERR_ALREADY_LOADED (-9)
+#define VERR_PERMISSION_DENIED (-10)
+#define VERR_VERSION_MISMATCH (-11)
+#define VERR_NOT_IMPLEMENTED (-12)
+#define VERR_INVALID_FLAGS (-13)
+
+#define VERR_NOT_EQUAL (-18)
+#define VERR_NOT_SYMLINK (-19)
+#define VERR_NO_TMP_MEMORY (-20)
+#define VERR_INVALID_FMODE (-21)
+#define VERR_WRONG_ORDER (-22)
+#define VERR_NO_TLS_FOR_SELF (-23)
+#define VERR_FAILED_TO_SET_SELF_TLS (-24)
+#define VERR_NO_CONT_MEMORY (-26)
+#define VERR_NO_PAGE_MEMORY (-27)
+#define VERR_THREAD_IS_DEAD (-29)
+#define VERR_THREAD_NOT_WAITABLE (-30)
+#define VERR_PAGE_TABLE_NOT_PRESENT (-31)
+#define VERR_INVALID_CONTEXT (-32)
+#define VERR_TIMER_BUSY (-33)
+#define VERR_ADDRESS_CONFLICT (-34)
+#define VERR_UNRESOLVED_ERROR (-35)
+#define VERR_INVALID_FUNCTION (-36)
+#define VERR_NOT_SUPPORTED (-37)
+#define VERR_ACCESS_DENIED (-38)
+#define VERR_INTERRUPTED (-39)
+#define VERR_TIMEOUT (-40)
+#define VERR_BUFFER_OVERFLOW (-41)
+#define VERR_TOO_MUCH_DATA (-42)
+#define VERR_MAX_THRDS_REACHED (-43)
+#define VERR_MAX_PROCS_REACHED (-44)
+#define VERR_SIGNAL_REFUSED (-45)
+#define VERR_SIGNAL_PENDING (-46)
+#define VERR_SIGNAL_INVALID (-47)
+#define VERR_STATE_CHANGED (-48)
+#define VERR_INVALID_UUID_FORMAT (-49)
+#define VERR_PROCESS_NOT_FOUND (-50)
+#define VERR_PROCESS_RUNNING (-51)
+#define VERR_TRY_AGAIN (-52)
+#define VERR_PARSE_ERROR (-53)
+#define VERR_OUT_OF_RANGE (-54)
+#define VERR_NUMBER_TOO_BIG (-55)
+#define VERR_NO_DIGITS (-56)
+#define VERR_NEGATIVE_UNSIGNED (-57)
+#define VERR_NO_TRANSLATION (-58)
+
+#define VERR_NOT_FOUND (-78)
+#define VERR_INVALID_STATE (-79)
+#define VERR_OUT_OF_RESOURCES (-80)
+
+#define VERR_FILE_NOT_FOUND (-102)
+#define VERR_PATH_NOT_FOUND (-103)
+#define VERR_INVALID_NAME (-104)
+#define VERR_ALREADY_EXISTS (-105)
+#define VERR_TOO_MANY_OPEN_FILES (-106)
+#define VERR_SEEK (-107)
+#define VERR_NEGATIVE_SEEK (-108)
+#define VERR_SEEK_ON_DEVICE (-109)
+#define VERR_EOF (-110)
+#define VERR_READ_ERROR (-111)
+#define VERR_WRITE_ERROR (-112)
+#define VERR_WRITE_PROTECT (-113)
+#define VERR_SHARING_VIOLATION (-114)
+#define VERR_FILE_LOCK_FAILED (-115)
+#define VERR_FILE_LOCK_VIOLATION (-116)
+#define VERR_CANT_CREATE (-117)
+#define VERR_CANT_DELETE_DIRECTORY (-118)
+#define VERR_NOT_SAME_DEVICE (-119)
+#define VERR_FILENAME_TOO_LONG (-120)
+#define VERR_MEDIA_NOT_PRESENT (-121)
+#define VERR_MEDIA_NOT_RECOGNIZED (-122)
+#define VERR_FILE_NOT_LOCKED (-123)
+#define VERR_FILE_LOCK_LOST (-124)
+#define VERR_DIR_NOT_EMPTY (-125)
+#define VERR_NOT_A_DIRECTORY (-126)
+#define VERR_IS_A_DIRECTORY (-127)
+#define VERR_FILE_TOO_BIG (-128)
+
+#define VERR_NET_IO_ERROR (-400)
+#define VERR_NET_OUT_OF_RESOURCES (-401)
+#define VERR_NET_HOST_NOT_FOUND (-402)
+#define VERR_NET_PATH_NOT_FOUND (-403)
+#define VERR_NET_PRINT_ERROR (-404)
+#define VERR_NET_NO_NETWORK (-405)
+#define VERR_NET_NOT_UNIQUE_NAME (-406)
+
+#define VERR_NET_IN_PROGRESS (-436)
+#define VERR_NET_ALREADY_IN_PROGRESS (-437)
+#define VERR_NET_NOT_SOCKET (-438)
+#define VERR_NET_DEST_ADDRESS_REQUIRED (-439)
+#define VERR_NET_MSG_SIZE (-440)
+#define VERR_NET_PROTOCOL_TYPE (-441)
+#define VERR_NET_PROTOCOL_NOT_AVAILABLE (-442)
+#define VERR_NET_PROTOCOL_NOT_SUPPORTED (-443)
+#define VERR_NET_SOCKET_TYPE_NOT_SUPPORTED (-444)
+#define VERR_NET_OPERATION_NOT_SUPPORTED (-445)
+#define VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED (-446)
+#define VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED (-447)
+#define VERR_NET_ADDRESS_IN_USE (-448)
+#define VERR_NET_ADDRESS_NOT_AVAILABLE (-449)
+#define VERR_NET_DOWN (-450)
+#define VERR_NET_UNREACHABLE (-451)
+#define VERR_NET_CONNECTION_RESET (-452)
+#define VERR_NET_CONNECTION_ABORTED (-453)
+#define VERR_NET_CONNECTION_RESET_BY_PEER (-454)
+#define VERR_NET_NO_BUFFER_SPACE (-455)
+#define VERR_NET_ALREADY_CONNECTED (-456)
+#define VERR_NET_NOT_CONNECTED (-457)
+#define VERR_NET_SHUTDOWN (-458)
+#define VERR_NET_TOO_MANY_REFERENCES (-459)
+#define VERR_NET_CONNECTION_TIMED_OUT (-460)
+#define VERR_NET_CONNECTION_REFUSED (-461)
+#define VERR_NET_HOST_DOWN (-464)
+#define VERR_NET_HOST_UNREACHABLE (-465)
+#define VERR_NET_PROTOCOL_ERROR (-466)
+#define VERR_NET_INCOMPLETE_TX_PACKET (-467)
+
+/* misc. unsorted codes */
+#define VERR_RESOURCE_BUSY (-138)
+#define VERR_DISK_FULL (-152)
+#define VERR_TOO_MANY_SYMLINKS (-156)
+#define VERR_NO_MORE_FILES (-201)
+#define VERR_INTERNAL_ERROR (-225)
+#define VERR_INTERNAL_ERROR_2 (-226)
+#define VERR_INTERNAL_ERROR_3 (-227)
+#define VERR_INTERNAL_ERROR_4 (-228)
+#define VERR_DEV_IO_ERROR (-250)
+#define VERR_IO_BAD_LENGTH (-255)
+#define VERR_BROKEN_PIPE (-301)
+#define VERR_NO_DATA (-304)
+#define VERR_SEM_DESTROYED (-363)
+#define VERR_DEADLOCK (-365)
+#define VERR_BAD_EXE_FORMAT (-608)
+#define VINF_HGCM_ASYNC_EXECUTE (2903)
+
+#define RT_SUCCESS(rc) ((rc) >= 0)
+#define RT_FAILURE(rc) ((rc) < 0)
+
+#endif
diff --git a/include/uapi/linux/vbox_ostypes.h b/include/uapi/linux/vbox_ostypes.h
new file mode 100644
index 000000000000..abe9a38ebfbd
--- /dev/null
+++ b/include/uapi/linux/vbox_ostypes.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * VirtualBox - Global Guest Operating System definition.
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOX_OSTYPES_H__
+#define __UAPI_VBOX_OSTYPES_H__
+
+/** The bit number which indicates 64-bit or 32-bit. */
+#define VBOXOSTYPE_x64_BIT 8
+
+/**
+ * Global list of guest operating system types.
+ *
+ * They are grouped into families. A family identifer is always has
+ * mod 0x10000 == 0. New entries can be added, however other components
+ * depend on the values (e.g. the Qt GUI and guest additions) so the
+ * existing values MUST stay the same.
+ *
+ * Note: distinguish between 32 & 64 bits guest OSes by checking bit 8.
+ */
+typedef enum VBOXOSTYPE {
+ VBOXOSTYPE_Unknown = 0,
+ VBOXOSTYPE_Unknown_x64 = 0x00100,
+ VBOXOSTYPE_DOS = 0x10000,
+ VBOXOSTYPE_Win31 = 0x15000,
+ VBOXOSTYPE_Win9x = 0x20000,
+ VBOXOSTYPE_Win95 = 0x21000,
+ VBOXOSTYPE_Win98 = 0x22000,
+ VBOXOSTYPE_WinMe = 0x23000,
+ VBOXOSTYPE_WinNT = 0x30000,
+ VBOXOSTYPE_WinNT_x64 = 0x30100,
+ VBOXOSTYPE_WinNT4 = 0x31000,
+ VBOXOSTYPE_Win2k = 0x32000,
+ VBOXOSTYPE_WinXP = 0x33000,
+ VBOXOSTYPE_WinXP_x64 = 0x33100,
+ VBOXOSTYPE_Win2k3 = 0x34000,
+ VBOXOSTYPE_Win2k3_x64 = 0x34100,
+ VBOXOSTYPE_WinVista = 0x35000,
+ VBOXOSTYPE_WinVista_x64 = 0x35100,
+ VBOXOSTYPE_Win2k8 = 0x36000,
+ VBOXOSTYPE_Win2k8_x64 = 0x36100,
+ VBOXOSTYPE_Win7 = 0x37000,
+ VBOXOSTYPE_Win7_x64 = 0x37100,
+ VBOXOSTYPE_Win8 = 0x38000,
+ VBOXOSTYPE_Win8_x64 = 0x38100,
+ VBOXOSTYPE_Win2k12_x64 = 0x39100,
+ VBOXOSTYPE_Win81 = 0x3A000,
+ VBOXOSTYPE_Win81_x64 = 0x3A100,
+ VBOXOSTYPE_Win10 = 0x3B000,
+ VBOXOSTYPE_Win10_x64 = 0x3B100,
+ VBOXOSTYPE_Win2k16_x64 = 0x3C100,
+ VBOXOSTYPE_OS2 = 0x40000,
+ VBOXOSTYPE_OS2Warp3 = 0x41000,
+ VBOXOSTYPE_OS2Warp4 = 0x42000,
+ VBOXOSTYPE_OS2Warp45 = 0x43000,
+ VBOXOSTYPE_ECS = 0x44000,
+ VBOXOSTYPE_OS21x = 0x48000,
+ VBOXOSTYPE_Linux = 0x50000,
+ VBOXOSTYPE_Linux_x64 = 0x50100,
+ VBOXOSTYPE_Linux22 = 0x51000,
+ VBOXOSTYPE_Linux24 = 0x52000,
+ VBOXOSTYPE_Linux24_x64 = 0x52100,
+ VBOXOSTYPE_Linux26 = 0x53000,
+ VBOXOSTYPE_Linux26_x64 = 0x53100,
+ VBOXOSTYPE_ArchLinux = 0x54000,
+ VBOXOSTYPE_ArchLinux_x64 = 0x54100,
+ VBOXOSTYPE_Debian = 0x55000,
+ VBOXOSTYPE_Debian_x64 = 0x55100,
+ VBOXOSTYPE_OpenSUSE = 0x56000,
+ VBOXOSTYPE_OpenSUSE_x64 = 0x56100,
+ VBOXOSTYPE_FedoraCore = 0x57000,
+ VBOXOSTYPE_FedoraCore_x64 = 0x57100,
+ VBOXOSTYPE_Gentoo = 0x58000,
+ VBOXOSTYPE_Gentoo_x64 = 0x58100,
+ VBOXOSTYPE_Mandriva = 0x59000,
+ VBOXOSTYPE_Mandriva_x64 = 0x59100,
+ VBOXOSTYPE_RedHat = 0x5A000,
+ VBOXOSTYPE_RedHat_x64 = 0x5A100,
+ VBOXOSTYPE_Turbolinux = 0x5B000,
+ VBOXOSTYPE_Turbolinux_x64 = 0x5B100,
+ VBOXOSTYPE_Ubuntu = 0x5C000,
+ VBOXOSTYPE_Ubuntu_x64 = 0x5C100,
+ VBOXOSTYPE_Xandros = 0x5D000,
+ VBOXOSTYPE_Xandros_x64 = 0x5D100,
+ VBOXOSTYPE_Oracle = 0x5E000,
+ VBOXOSTYPE_Oracle_x64 = 0x5E100,
+ VBOXOSTYPE_FreeBSD = 0x60000,
+ VBOXOSTYPE_FreeBSD_x64 = 0x60100,
+ VBOXOSTYPE_OpenBSD = 0x61000,
+ VBOXOSTYPE_OpenBSD_x64 = 0x61100,
+ VBOXOSTYPE_NetBSD = 0x62000,
+ VBOXOSTYPE_NetBSD_x64 = 0x62100,
+ VBOXOSTYPE_Netware = 0x70000,
+ VBOXOSTYPE_Solaris = 0x80000,
+ VBOXOSTYPE_Solaris_x64 = 0x80100,
+ VBOXOSTYPE_OpenSolaris = 0x81000,
+ VBOXOSTYPE_OpenSolaris_x64 = 0x81100,
+ VBOXOSTYPE_Solaris11_x64 = 0x82100,
+ VBOXOSTYPE_L4 = 0x90000,
+ VBOXOSTYPE_QNX = 0xA0000,
+ VBOXOSTYPE_MacOS = 0xB0000,
+ VBOXOSTYPE_MacOS_x64 = 0xB0100,
+ VBOXOSTYPE_MacOS106 = 0xB2000,
+ VBOXOSTYPE_MacOS106_x64 = 0xB2100,
+ VBOXOSTYPE_MacOS107_x64 = 0xB3100,
+ VBOXOSTYPE_MacOS108_x64 = 0xB4100,
+ VBOXOSTYPE_MacOS109_x64 = 0xB5100,
+ VBOXOSTYPE_MacOS1010_x64 = 0xB6100,
+ VBOXOSTYPE_MacOS1011_x64 = 0xB7100,
+ VBOXOSTYPE_JRockitVE = 0xC0000,
+ VBOXOSTYPE_Haiku = 0xD0000,
+ VBOXOSTYPE_Haiku_x64 = 0xD0100,
+ VBOXOSTYPE_VBoxBS_x64 = 0xE0100,
+ /** The mask which indicates 64-bit. */
+ VBOXOSTYPE_x64 = 1 << VBOXOSTYPE_x64_BIT,
+ /** The usual 32-bit hack. */
+ VBOXOSTYPE_32BIT_HACK = 0x7fffffff
+} VBOXOSTYPE;
+
+/**
+ * Global list of guest OS families.
+ */
+typedef enum VBOXOSFAMILY {
+ VBOXOSFAMILY_Unknown = 0,
+ VBOXOSFAMILY_Windows32 = 1,
+ VBOXOSFAMILY_Windows64 = 2,
+ VBOXOSFAMILY_Linux32 = 3,
+ VBOXOSFAMILY_Linux64 = 4,
+ VBOXOSFAMILY_FreeBSD32 = 5,
+ VBOXOSFAMILY_FreeBSD64 = 6,
+ VBOXOSFAMILY_Solaris32 = 7,
+ VBOXOSFAMILY_Solaris64 = 8,
+ VBOXOSFAMILY_MacOSX32 = 9,
+ VBOXOSFAMILY_MacOSX64 = 10,
+ /** The usual 32-bit hack. */
+ VBOXOSFAMILY_32BIT_HACK = 0x7fffffff
+} VBOXOSFAMILY;
+
+#endif
diff --git a/include/uapi/linux/vbox_vmmdev.h b/include/uapi/linux/vbox_vmmdev.h
new file mode 100644
index 000000000000..d19265dbf906
--- /dev/null
+++ b/include/uapi/linux/vbox_vmmdev.h
@@ -0,0 +1,1743 @@
+/*
+ * Virtual Device for Guest <-> VMM/Host communication (ADD,DEV).
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOX_VMMDEV_H__
+#define __UAPI_VBOX_VMMDEV_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/types.h>
+#include <linux/vbox_ostypes.h>
+
+/*
+ * We cannot use linux' compiletime_assert here because it expects to be used
+ * inside a function only. Use a typedef to a char array with a negative size.
+ */
+#define VMMDEV_ASSERT_SIZE(type, size) \
+ typedef char type ## _assert_size[1 - 2*!!(sizeof(type) != (size))]
+#define VMMDEV_ASSERT_MEMBER_OFFSET(type, member, offset) \
+ typedef char type ## _ ## member ## _assert_member_offset \
+ [1 - 2*!!(offsetof(type, member) != (offset))]
+
+/*
+ * The host expects dwords / 32 bit packing. Using __aligned(4)
+ * everywhere is not really practical and also does not seem to work.
+ * Specifically I've been unable to get structs using bools and
+ * HGCMFunctionParameter32 / 64 to compile to the right size using __aligned(),
+ * so lets we're sticking with pragma pack(4) here.
+ */
+#pragma pack(4)
+
+/**
+ * @defgroup grp_vmmdev VMM Device
+ *
+ * @note This interface cannot be changed, it can only be extended!
+ *
+ * @{
+ */
+
+/** Port for generic request interface (relative offset). */
+#define VMMDEV_PORT_OFF_REQUEST 0
+
+/**
+ * @name VMMDev events.
+ *
+ * Used mainly by VMMDevReq_AcknowledgeEvents/VMMDevEvents and version 1.3 of
+ * VMMDevMemory.
+ *
+ * @{
+ */
+/** Host mouse capabilities has been changed. */
+#define VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED BIT(0)
+/** HGCM event. */
+#define VMMDEV_EVENT_HGCM BIT(1)
+/** A display change request has been issued. */
+#define VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST BIT(2)
+/** Credentials are available for judgement. */
+#define VMMDEV_EVENT_JUDGE_CREDENTIALS BIT(3)
+/** The guest has been restored. */
+#define VMMDEV_EVENT_RESTORED BIT(4)
+/** Seamless mode state changed. */
+#define VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST BIT(5)
+/** Memory balloon size changed. */
+#define VMMDEV_EVENT_BALLOON_CHANGE_REQUEST BIT(6)
+/** Statistics interval changed. */
+#define VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST BIT(7)
+/** VRDP status changed. */
+#define VMMDEV_EVENT_VRDP BIT(8)
+/** New mouse position data available. */
+#define VMMDEV_EVENT_MOUSE_POSITION_CHANGED BIT(9)
+/** CPU hotplug event occurred. */
+#define VMMDEV_EVENT_CPU_HOTPLUG BIT(10)
+/** The mask of valid events, for sanity checking. */
+#define VMMDEV_EVENT_VALID_EVENT_MASK 0x000007ffU
+/** @} */
+
+/** @defgroup grp_vmmdev_req VMMDev Generic Request Interface
+ * @{
+ */
+
+/** @name Current version of the VMMDev interface.
+ *
+ * Additions are allowed to work only if
+ * additions_major == vmmdev_current && additions_minor <= vmmdev_current.
+ * Additions version is reported to host (VMMDev) by VMMDevReq_ReportGuestInfo.
+ *
+ * @remarks These defines also live in the 16-bit and assembly versions of this
+ * header.
+ */
+#define VMMDEV_VERSION 0x00010004
+#define VMMDEV_VERSION_MAJOR (VMMDEV_VERSION >> 16)
+#define VMMDEV_VERSION_MINOR (VMMDEV_VERSION & 0xffff)
+/** @} */
+
+/** Maximum request packet size. */
+#define VMMDEV_MAX_VMMDEVREQ_SIZE 1048576
+/** Maximum number of HGCM parameters. */
+#define VMMDEV_MAX_HGCM_PARMS 1024
+/** Maximum total size of hgcm buffers in one call. */
+#define VMMDEV_MAX_HGCM_DATA_SIZE 0x7fffffffU
+
+/**
+ * VMMDev request types.
+ * @note when updating this, adjust vmmdevGetRequestSize() as well
+ */
+typedef enum {
+ VMMDevReq_InvalidRequest = 0,
+ VMMDevReq_GetMouseStatus = 1,
+ VMMDevReq_SetMouseStatus = 2,
+ VMMDevReq_SetPointerShape = 3,
+ VMMDevReq_GetHostVersion = 4,
+ VMMDevReq_Idle = 5,
+ VMMDevReq_GetHostTime = 10,
+ VMMDevReq_GetHypervisorInfo = 20,
+ VMMDevReq_SetHypervisorInfo = 21,
+ VMMDevReq_RegisterPatchMemory = 22, /* since version 3.0.6 */
+ VMMDevReq_DeregisterPatchMemory = 23, /* since version 3.0.6 */
+ VMMDevReq_SetPowerStatus = 30,
+ VMMDevReq_AcknowledgeEvents = 41,
+ VMMDevReq_CtlGuestFilterMask = 42,
+ VMMDevReq_ReportGuestInfo = 50,
+ VMMDevReq_ReportGuestInfo2 = 58, /* since version 3.2.0 */
+ VMMDevReq_ReportGuestStatus = 59, /* since version 3.2.8 */
+ VMMDevReq_ReportGuestUserState = 74, /* since version 4.3 */
+ /**
+ * Retrieve a display resize request sent by the host using
+ * @a IDisplay:setVideoModeHint. Deprecated.
+ *
+ * Similar to @a VMMDevReq_GetDisplayChangeRequest2, except that it only
+ * considers host requests sent for the first virtual display. This
+ * guest-req should not be used in new guest code, and the results are
+ * undefined if a guest mixes calls to this and
+ * @a VMMDevReq_GetDisplayChangeRequest2.
+ */
+ VMMDevReq_GetDisplayChangeRequest = 51,
+ VMMDevReq_VideoModeSupported = 52,
+ VMMDevReq_GetHeightReduction = 53,
+ /**
+ * Retrieve a display resize request sent by the host using
+ * @a IDisplay:setVideoModeHint.
+ *
+ * Queries a display resize request sent from the host. If the
+ * @a eventAck member is sent to true and there is an unqueried request
+ * available for one of the virtual display then that request will
+ * be returned. If several displays have unqueried requests the lowest
+ * numbered display will be chosen first. Only the most recent unseen
+ * request for each display is remembered.
+ * If @a eventAck is set to false, the last host request queried with
+ * @a eventAck set is resent, or failing that the most recent received
+ * from the host. If no host request was ever received then all zeros
+ * are returned.
+ */
+ VMMDevReq_GetDisplayChangeRequest2 = 54,
+ VMMDevReq_ReportGuestCapabilities = 55,
+ VMMDevReq_SetGuestCapabilities = 56,
+ VMMDevReq_VideoModeSupported2 = 57, /* since version 3.2.0 */
+ VMMDevReq_GetDisplayChangeRequestEx = 80, /* since version 4.2.4 */
+ VMMDevReq_HGCMConnect = 60,
+ VMMDevReq_HGCMDisconnect = 61,
+ VMMDevReq_HGCMCall32 = 62,
+ VMMDevReq_HGCMCall64 = 63,
+ VMMDevReq_HGCMCancel = 64,
+ VMMDevReq_HGCMCancel2 = 65,
+ VMMDevReq_VideoAccelEnable = 70,
+ VMMDevReq_VideoAccelFlush = 71,
+ VMMDevReq_VideoSetVisibleRegion = 72,
+ VMMDevReq_GetSeamlessChangeRequest = 73,
+ VMMDevReq_QueryCredentials = 100,
+ VMMDevReq_ReportCredentialsJudgement = 101,
+ VMMDevReq_ReportGuestStats = 110,
+ VMMDevReq_GetMemBalloonChangeRequest = 111,
+ VMMDevReq_GetStatisticsChangeRequest = 112,
+ VMMDevReq_ChangeMemBalloon = 113,
+ VMMDevReq_GetVRDPChangeRequest = 150,
+ VMMDevReq_LogString = 200,
+ VMMDevReq_GetCpuHotPlugRequest = 210,
+ VMMDevReq_SetCpuHotPlugStatus = 211,
+ VMMDevReq_RegisterSharedModule = 212,
+ VMMDevReq_UnregisterSharedModule = 213,
+ VMMDevReq_CheckSharedModules = 214,
+ VMMDevReq_GetPageSharingStatus = 215,
+ VMMDevReq_DebugIsPageShared = 216,
+ VMMDevReq_GetSessionId = 217, /* since version 3.2.8 */
+ VMMDevReq_WriteCoreDump = 218,
+ VMMDevReq_GuestHeartbeat = 219,
+ VMMDevReq_HeartbeatConfigure = 220,
+ VMMDevReq_SizeHack = 0x7fffffff
+} VMMDevRequestType;
+
+#if __BITS_PER_LONG == 64
+#define VMMDevReq_HGCMCall VMMDevReq_HGCMCall64
+#else
+#define VMMDevReq_HGCMCall VMMDevReq_HGCMCall32
+#endif
+
+/** Version of VMMDevRequestHeader structure. */
+#define VMMDEV_REQUEST_HEADER_VERSION (0x10001)
+
+/**
+ * Generic VMMDev request header.
+ */
+typedef struct {
+ /** IN: Size of the structure in bytes (including body). */
+ u32 size;
+ /** IN: Version of the structure. */
+ u32 version;
+ /** IN: Type of the request. */
+ VMMDevRequestType requestType;
+ /** OUT: Return code. */
+ s32 rc;
+ /** Reserved field no.1. MBZ. */
+ u32 reserved1;
+ /** Reserved field no.2. MBZ. */
+ u32 reserved2;
+} VMMDevRequestHeader;
+VMMDEV_ASSERT_SIZE(VMMDevRequestHeader, 24);
+
+/**
+ * Mouse status request structure.
+ *
+ * Used by VMMDevReq_GetMouseStatus and VMMDevReq_SetMouseStatus.
+ */
+typedef struct {
+ /** header */
+ VMMDevRequestHeader header;
+ /** Mouse feature mask. See VMMDEV_MOUSE_*. */
+ u32 mouseFeatures;
+ /** Mouse x position. */
+ s32 pointerXPos;
+ /** Mouse y position. */
+ s32 pointerYPos;
+} VMMDevReqMouseStatus;
+VMMDEV_ASSERT_SIZE(VMMDevReqMouseStatus, 24+12);
+
+/**
+ * @name Mouse capability bits (VMMDevReqMouseStatus::mouseFeatures).
+ * @{
+ */
+/** The guest can (== wants to) handle absolute coordinates. */
+#define VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE BIT(0)
+/**
+ * The host can (== wants to) send absolute coordinates.
+ * (Input not captured.)
+ */
+#define VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE BIT(1)
+/**
+ * The guest can *NOT* switch to software cursor and therefore depends on the
+ * host cursor.
+ *
+ * When guest additions are installed and the host has promised to display the
+ * cursor itself, the guest installs a hardware mouse driver. Don't ask the
+ * guest to switch to a software cursor then.
+ */
+#define VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR BIT(2)
+/** The host does NOT provide support for drawing the cursor itself. */
+#define VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER BIT(3)
+/** The guest can read VMMDev events to find out about pointer movement */
+#define VMMDEV_MOUSE_NEW_PROTOCOL BIT(4)
+/**
+ * If the guest changes the status of the VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR
+ * bit, the host will honour this.
+ */
+#define VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR BIT(5)
+/**
+ * The host supplies an absolute pointing device. The Guest Additions may
+ * wish to use this to decide whether to install their own driver.
+ */
+#define VMMDEV_MOUSE_HOST_HAS_ABS_DEV BIT(6)
+/** The mask of all VMMDEV_MOUSE_* flags */
+#define VMMDEV_MOUSE_MASK 0x0000007fU
+/**
+ * The mask of guest capability changes for which notification events should
+ * be sent.
+ */
+#define VMMDEV_MOUSE_NOTIFY_HOST_MASK \
+ (VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)
+/** The mask of all capabilities which the guest can legitimately change */
+#define VMMDEV_MOUSE_GUEST_MASK \
+ (VMMDEV_MOUSE_NOTIFY_HOST_MASK | VMMDEV_MOUSE_NEW_PROTOCOL)
+/**
+ * The mask of host capability changes for which notification events should
+ * be sent.
+ */
+#define VMMDEV_MOUSE_NOTIFY_GUEST_MASK \
+ VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE
+/** The mask of all capabilities which the host can legitimately change */
+#define VMMDEV_MOUSE_HOST_MASK \
+ (VMMDEV_MOUSE_NOTIFY_GUEST_MASK |\
+ VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER |\
+ VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR| \
+ VMMDEV_MOUSE_HOST_HAS_ABS_DEV)
+/** @} */
+
+/**
+ * @name Absolute mouse reporting range
+ * @{
+ */
+/** @todo Should these be here? They are needed by both host and guest. */
+/** The minumum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MIN 0
+/** The maximum value our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE_MAX 0xFFFF
+/** The full range our pointing device can return. */
+#define VMMDEV_MOUSE_RANGE (VMMDEV_MOUSE_RANGE_MAX - VMMDEV_MOUSE_RANGE_MIN)
+/** @} */
+
+/**
+ * Mouse pointer shape/visibility change request.
+ *
+ * Used by VMMDevReq_SetPointerShape. The size is variable.
+ */
+typedef struct VMMDevReqMousePointer {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** VBOX_MOUSE_POINTER_* bit flags from VBox/Graphics/VBoxVideo.h. */
+ u32 fFlags;
+ /** x coordinate of hot spot. */
+ u32 xHot;
+ /** y coordinate of hot spot. */
+ u32 yHot;
+ /** Width of the pointer in pixels. */
+ u32 width;
+ /** Height of the pointer in scanlines. */
+ u32 height;
+ /**
+ * Pointer data.
+ *
+ ****
+ * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
+ * mask.
+ *
+ * For pointers without alpha channel the XOR mask pixels are 32 bit
+ * values: (lsb)BGR0(msb).
+ * For pointers with alpha channel the XOR mask consists of
+ * (lsb)BGRA(msb) 32 bit values.
+ *
+ * Guest driver must create the AND mask for pointers with alpha chan,
+ * so if host does not support alpha, the pointer could be displayed as
+ * a normal color pointer. The AND mask can be constructed from alpha
+ * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
+ *
+ * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
+ * mask, therefore, is cbAnd = (width + 7) / 8 * height. The padding
+ * bits at the end of any scanline are undefined.
+ *
+ * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
+ * u8 *pXor = pAnd + (cbAnd + 3) & ~3
+ * Bytes in the gap between the AND and the XOR mask are undefined.
+ * XOR mask scanlines have no gap between them and size of XOR mask is:
+ * cXor = width * 4 * height.
+ ****
+ *
+ * Preallocate 4 bytes for accessing actual data as p->pointerData.
+ */
+ char pointerData[4];
+} VMMDevReqMousePointer;
+VMMDEV_ASSERT_SIZE(VMMDevReqMousePointer, 24+24);
+
+/**
+ * String log request structure.
+ *
+ * Used by VMMDevReq_LogString.
+ * @deprecated Use the IPRT logger or VbglR3WriteLog instead.
+ */
+typedef struct {
+ /** header */
+ VMMDevRequestHeader header;
+ /** variable length string data */
+ char szString[1];
+} VMMDevReqLogString;
+VMMDEV_ASSERT_SIZE(VMMDevReqLogString, 24+4);
+
+/**
+ * VirtualBox host version request structure.
+ *
+ * Used by VMMDevReq_GetHostVersion.
+ *
+ * @remarks VBGL uses this to detect the precense of new features in the
+ * interface.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Major version. */
+ u16 major;
+ /** Minor version. */
+ u16 minor;
+ /** Build number. */
+ u32 build;
+ /** SVN revision. */
+ u32 revision;
+ /** Feature mask. */
+ u32 features;
+} VMMDevReqHostVersion;
+VMMDEV_ASSERT_SIZE(VMMDevReqHostVersion, 24+16);
+
+/**
+ * @name VMMDevReqHostVersion::features
+ * @{
+ */
+/** Physical page lists are supported by HGCM. */
+#define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST BIT(0)
+/** @} */
+
+/**
+ * Guest capabilities structure.
+ *
+ * Used by VMMDevReq_ReportGuestCapabilities.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Capabilities (VMMDEV_GUEST_*). */
+ u32 caps;
+} VMMDevReqGuestCapabilities;
+VMMDEV_ASSERT_SIZE(VMMDevReqGuestCapabilities, 24+4);
+
+/**
+ * Guest capabilities structure, version 2.
+ *
+ * Used by VMMDevReq_SetGuestCapabilities.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Mask of capabilities to be added. */
+ u32 u32OrMask;
+ /** Mask of capabilities to be removed. */
+ u32 u32NotMask;
+} VMMDevReqGuestCapabilities2;
+VMMDEV_ASSERT_SIZE(VMMDevReqGuestCapabilities2, 24+8);
+
+/**
+ * @name Guest capability bits.
+ * Used by VMMDevReq_ReportGuestCapabilities and VMMDevReq_SetGuestCapabilities.
+ * @{
+ */
+/** The guest supports seamless display rendering. */
+#define VMMDEV_GUEST_SUPPORTS_SEAMLESS BIT(0)
+/** The guest supports mapping guest to host windows. */
+#define VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING BIT(1)
+/**
+ * The guest graphical additions are active.
+ * Used for fast activation and deactivation of certain graphical operations
+ * (e.g. resizing & seamless). The legacy VMMDevReq_ReportGuestCapabilities
+ * request sets this automatically, but VMMDevReq_SetGuestCapabilities does
+ * not.
+ */
+#define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2)
+/** The mask of valid events, for sanity checking. */
+#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U
+/** @} */
+
+/**
+ * Idle request structure.
+ *
+ * Used by VMMDevReq_Idle.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+} VMMDevReqIdle;
+VMMDEV_ASSERT_SIZE(VMMDevReqIdle, 24);
+
+/**
+ * Host time request structure.
+ *
+ * Used by VMMDevReq_GetHostTime.
+ */
+typedef struct {
+ /** Header */
+ VMMDevRequestHeader header;
+ /** OUT: Time in milliseconds since unix epoch. */
+ u64 time;
+} VMMDevReqHostTime;
+VMMDEV_ASSERT_SIZE(VMMDevReqHostTime, 24+8);
+
+/**
+ * Hypervisor info structure.
+ *
+ * Used by VMMDevReq_GetHypervisorInfo and VMMDevReq_SetHypervisorInfo.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /**
+ * Guest virtual address of proposed hypervisor start.
+ * Not used by VMMDevReq_GetHypervisorInfo.
+ * @todo Make this 64-bit compatible?
+ */
+ u32 hypervisorStart;
+ /** Hypervisor size in bytes. */
+ u32 hypervisorSize;
+} VMMDevReqHypervisorInfo;
+VMMDEV_ASSERT_SIZE(VMMDevReqHypervisorInfo, 24+8);
+
+/**
+ * @name Default patch memory size .
+ * Used by VMMDevReq_RegisterPatchMemory and VMMDevReq_DeregisterPatchMemory.
+ * @{
+ */
+#define VMMDEV_GUEST_DEFAULT_PATCHMEM_SIZE 8192
+/** @} */
+
+/**
+ * Patching memory structure. (locked executable & read-only page from the
+ * guest's perspective)
+ *
+ * Used by VMMDevReq_RegisterPatchMemory and VMMDevReq_DeregisterPatchMemory
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest virtual address of the patching page(s). */
+ u64 pPatchMem;
+ /** Patch page size in bytes. */
+ u32 cbPatchMem;
+} VMMDevReqPatchMemory;
+VMMDEV_ASSERT_SIZE(VMMDevReqPatchMemory, 24+12);
+
+/**
+ * Guest power requests.
+ *
+ * See VMMDevReq_SetPowerStatus and VMMDevPowerStateRequest.
+ */
+typedef enum {
+ VMMDevPowerState_Invalid = 0,
+ VMMDevPowerState_Pause = 1,
+ VMMDevPowerState_PowerOff = 2,
+ VMMDevPowerState_SaveState = 3,
+ VMMDevPowerState_SizeHack = 0x7fffffff
+} VMMDevPowerState;
+VMMDEV_ASSERT_SIZE(VMMDevPowerState, 4);
+
+/**
+ * VM power status structure.
+ *
+ * Used by VMMDevReq_SetPowerStatus.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Power state request. */
+ VMMDevPowerState powerState;
+} VMMDevPowerStateRequest;
+VMMDEV_ASSERT_SIZE(VMMDevPowerStateRequest, 24+4);
+
+/**
+ * Pending events structure.
+ *
+ * Used by VMMDevReq_AcknowledgeEvents.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** OUT: Pending event mask. */
+ u32 events;
+} VMMDevEvents;
+VMMDEV_ASSERT_SIZE(VMMDevEvents, 24+4);
+
+/**
+ * Guest event filter mask control.
+ *
+ * Used by VMMDevReq_CtlGuestFilterMask.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Mask of events to be added to the filter. */
+ u32 u32OrMask;
+ /** Mask of events to be removed from the filter. */
+ u32 u32NotMask;
+} VMMDevCtlGuestFilterMask;
+VMMDEV_ASSERT_SIZE(VMMDevCtlGuestFilterMask, 24+8);
+
+/**
+ * Guest information structure.
+ *
+ * Used by VMMDevReportGuestInfo and PDMIVMMDEVCONNECTOR::pfnUpdateGuestVersion.
+ */
+typedef struct VBoxGuestInfo {
+ /**
+ * The VMMDev interface version expected by additions.
+ * *Deprecated*, do not use anymore! Will be removed.
+ */
+ u32 interfaceVersion;
+ /** Guest OS type. */
+ VBOXOSTYPE osType;
+} VBoxGuestInfo;
+VMMDEV_ASSERT_SIZE(VBoxGuestInfo, 8);
+
+/**
+ * Guest information report.
+ *
+ * Used by VMMDevReq_ReportGuestInfo.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest information. */
+ VBoxGuestInfo guestInfo;
+} VMMDevReportGuestInfo;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestInfo, 24+8);
+
+/**
+ * Guest information structure, version 2.
+ *
+ * Used by VMMDevReportGuestInfo2.
+ */
+typedef struct VBoxGuestInfo2 {
+ /** Major version. */
+ u16 additionsMajor;
+ /** Minor version. */
+ u16 additionsMinor;
+ /** Build number. */
+ u32 additionsBuild;
+ /** SVN revision. */
+ u32 additionsRevision;
+ /** Feature mask, currently unused. */
+ u32 additionsFeatures;
+ /**
+ * The intentional meaning of this field was:
+ * Some additional information, for example 'Beta 1' or something like
+ * that.
+ *
+ * The way it was implemented was implemented: VBOX_VERSION_STRING.
+ *
+ * This means the first three members are duplicated in this field (if
+ * the guest build config is sane). So, the user must check this and
+ * chop it off before usage. There is, because of the Main code's blind
+ * trust in the field's content, no way back.
+ */
+ char szName[128];
+} VBoxGuestInfo2;
+VMMDEV_ASSERT_SIZE(VBoxGuestInfo2, 144);
+
+/**
+ * Guest information report, version 2.
+ *
+ * Used by VMMDevReq_ReportGuestInfo2.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest information. */
+ VBoxGuestInfo2 guestInfo;
+} VMMDevReportGuestInfo2;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestInfo2, 24+144);
+
+/**
+ * The guest facility.
+ * This needs to be kept in sync with AdditionsFacilityType of the Main API!
+ */
+typedef enum {
+ VBoxGuestFacilityType_Unknown = 0,
+ VBoxGuestFacilityType_VBoxGuestDriver = 20,
+ /* VBoxGINA / VBoxCredProv / pam_vbox. */
+ VBoxGuestFacilityType_AutoLogon = 90,
+ VBoxGuestFacilityType_VBoxService = 100,
+ /* VBoxTray (Windows), VBoxClient (Linux, Unix). */
+ VBoxGuestFacilityType_VBoxTrayClient = 101,
+ VBoxGuestFacilityType_Seamless = 1000,
+ VBoxGuestFacilityType_Graphics = 1100,
+ VBoxGuestFacilityType_All = 0x7ffffffe,
+ VBoxGuestFacilityType_SizeHack = 0x7fffffff
+} VBoxGuestFacilityType;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityType, 4);
+
+/**
+ * The current guest status of a facility.
+ * This needs to be kept in sync with AdditionsFacilityStatus of the Main API!
+ *
+ * @remarks r=bird: Pretty please, for future types like this, simply do a
+ * linear allocation without any gaps. This stuff is impossible to work
+ * efficiently with, let alone validate. Applies to the other facility
+ * enums too.
+ */
+typedef enum {
+ VBoxGuestFacilityStatus_Inactive = 0,
+ VBoxGuestFacilityStatus_Paused = 1,
+ VBoxGuestFacilityStatus_PreInit = 20,
+ VBoxGuestFacilityStatus_Init = 30,
+ VBoxGuestFacilityStatus_Active = 50,
+ VBoxGuestFacilityStatus_Terminating = 100,
+ VBoxGuestFacilityStatus_Terminated = 101,
+ VBoxGuestFacilityStatus_Failed = 800,
+ VBoxGuestFacilityStatus_Unknown = 999,
+ VBoxGuestFacilityStatus_SizeHack = 0x7fffffff
+} VBoxGuestFacilityStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityStatus, 4);
+
+/**
+ * The facility class.
+ * This needs to be kept in sync with AdditionsFacilityClass of the Main API!
+ */
+typedef enum {
+ VBoxGuestFacilityClass_None = 0,
+ VBoxGuestFacilityClass_Driver = 10,
+ VBoxGuestFacilityClass_Service = 30,
+ VBoxGuestFacilityClass_Program = 50,
+ VBoxGuestFacilityClass_Feature = 100,
+ VBoxGuestFacilityClass_ThirdParty = 999,
+ VBoxGuestFacilityClass_All = 0x7ffffffe,
+ VBoxGuestFacilityClass_SizeHack = 0x7fffffff
+} VBoxGuestFacilityClass;
+VMMDEV_ASSERT_SIZE(VBoxGuestFacilityClass, 4);
+
+/**
+ * Guest status structure.
+ *
+ * Used by VMMDevReqGuestStatus.
+ */
+typedef struct VBoxGuestStatus {
+ /** Facility the status is indicated for. */
+ VBoxGuestFacilityType facility;
+ /** Current guest status. */
+ VBoxGuestFacilityStatus status;
+ /** Flags, not used at the moment. */
+ u32 flags;
+} VBoxGuestStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestStatus, 12);
+
+/**
+ * Guest Additions status structure.
+ *
+ * Used by VMMDevReq_ReportGuestStatus.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest information. */
+ VBoxGuestStatus guestStatus;
+} VMMDevReportGuestStatus;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestStatus, 24+12);
+
+/**
+ * The current status of specific guest user.
+ * This needs to be kept in sync with GuestUserState of the Main API!
+ */
+typedef enum VBoxGuestUserState {
+ VBoxGuestUserState_Unknown = 0,
+ VBoxGuestUserState_LoggedIn = 1,
+ VBoxGuestUserState_LoggedOut = 2,
+ VBoxGuestUserState_Locked = 3,
+ VBoxGuestUserState_Unlocked = 4,
+ VBoxGuestUserState_Disabled = 5,
+ VBoxGuestUserState_Idle = 6,
+ VBoxGuestUserState_InUse = 7,
+ VBoxGuestUserState_Created = 8,
+ VBoxGuestUserState_Deleted = 9,
+ VBoxGuestUserState_SessionChanged = 10,
+ VBoxGuestUserState_CredentialsChanged = 11,
+ VBoxGuestUserState_RoleChanged = 12,
+ VBoxGuestUserState_GroupAdded = 13,
+ VBoxGuestUserState_GroupRemoved = 14,
+ VBoxGuestUserState_Elevated = 15,
+ VBoxGuestUserState_SizeHack = 0x7fffffff
+} VBoxGuestUserState;
+VMMDEV_ASSERT_SIZE(VBoxGuestUserState, 4);
+
+/**
+ * Guest user status updates.
+ */
+typedef struct VBoxGuestUserStatus {
+ /** The guest user state to send. */
+ VBoxGuestUserState state;
+ /** Size (in bytes) of szUser. */
+ u32 cbUser;
+ /** Size (in bytes) of szDomain. */
+ u32 cbDomain;
+ /** Size (in bytes) of aDetails. */
+ u32 cbDetails;
+ /** Note: Here begins the dynamically allocated region. */
+ /** Guest user to report state for. */
+ char szUser[1];
+ /** Domain the guest user is bound to. */
+ char szDomain[1];
+ /** Optional details of the state. */
+ u8 aDetails[1];
+} VBoxGuestUserStatus;
+VMMDEV_ASSERT_SIZE(VBoxGuestUserStatus, 20);
+
+/**
+ * Guest user status structure.
+ *
+ * Used by VMMDevReq_ReportGuestUserStatus.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest user status. */
+ VBoxGuestUserStatus status;
+} VMMDevReportGuestUserState;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestUserState, 24+20);
+
+/**
+ * Guest statistics structure.
+ *
+ * Used by VMMDevReportGuestStats and PDMIVMMDEVCONNECTOR::pfnReportStatistics.
+ */
+typedef struct VBoxGuestStatistics {
+ /** Virtual CPU ID. */
+ u32 u32CpuId;
+ /** Reported statistics. */
+ u32 u32StatCaps;
+ /** Idle CPU load (0-100) for last interval. */
+ u32 u32CpuLoad_Idle;
+ /** Kernel CPU load (0-100) for last interval. */
+ u32 u32CpuLoad_Kernel;
+ /** User CPU load (0-100) for last interval. */
+ u32 u32CpuLoad_User;
+ /** Nr of threads. */
+ u32 u32Threads;
+ /** Nr of processes. */
+ u32 u32Processes;
+ /** Nr of handles. */
+ u32 u32Handles;
+ /** Memory load (0-100). */
+ u32 u32MemoryLoad;
+ /** Page size of guest system. */
+ u32 u32PageSize;
+ /** Total physical memory (in 4KB pages). */
+ u32 u32PhysMemTotal;
+ /** Available physical memory (in 4KB pages). */
+ u32 u32PhysMemAvail;
+ /** Ballooned physical memory (in 4KB pages). */
+ u32 u32PhysMemBalloon;
+ /** Total committed memory (not necessarily in-use) (in 4KB pages). */
+ u32 u32MemCommitTotal;
+ /** Total amount of memory used by the kernel (in 4KB pages). */
+ u32 u32MemKernelTotal;
+ /** Total amount of paged memory used by the kernel (in 4KB pages). */
+ u32 u32MemKernelPaged;
+ /** Total amount of nonpaged memory used by the kernel (4KB pages). */
+ u32 u32MemKernelNonPaged;
+ /** Total amount of memory used for the system cache (in 4KB pages). */
+ u32 u32MemSystemCache;
+ /** Pagefile size (in 4KB pages). */
+ u32 u32PageFileSize;
+} VBoxGuestStatistics;
+VMMDEV_ASSERT_SIZE(VBoxGuestStatistics, 19*4);
+
+/**
+ * @name Guest statistics values (VBoxGuestStatistics::u32StatCaps).
+ * @{
+ */
+#define VBOX_GUEST_STAT_CPU_LOAD_IDLE BIT(0)
+#define VBOX_GUEST_STAT_CPU_LOAD_KERNEL BIT(1)
+#define VBOX_GUEST_STAT_CPU_LOAD_USER BIT(2)
+#define VBOX_GUEST_STAT_THREADS BIT(3)
+#define VBOX_GUEST_STAT_PROCESSES BIT(4)
+#define VBOX_GUEST_STAT_HANDLES BIT(5)
+#define VBOX_GUEST_STAT_MEMORY_LOAD BIT(6)
+#define VBOX_GUEST_STAT_PHYS_MEM_TOTAL BIT(7)
+#define VBOX_GUEST_STAT_PHYS_MEM_AVAIL BIT(8)
+#define VBOX_GUEST_STAT_PHYS_MEM_BALLOON BIT(9)
+#define VBOX_GUEST_STAT_MEM_COMMIT_TOTAL BIT(10)
+#define VBOX_GUEST_STAT_MEM_KERNEL_TOTAL BIT(11)
+#define VBOX_GUEST_STAT_MEM_KERNEL_PAGED BIT(12)
+#define VBOX_GUEST_STAT_MEM_KERNEL_NONPAGED BIT(13)
+#define VBOX_GUEST_STAT_MEM_SYSTEM_CACHE BIT(14)
+#define VBOX_GUEST_STAT_PAGE_FILE_SIZE BIT(15)
+/** @} */
+
+/**
+ * Guest statistics command structure.
+ *
+ * Used by VMMDevReq_ReportGuestStats.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Guest information. */
+ VBoxGuestStatistics guestStats;
+} VMMDevReportGuestStats;
+VMMDEV_ASSERT_SIZE(VMMDevReportGuestStats, 24+19*4);
+
+/**
+ * @name The ballooning chunk size which VMMDev works at.
+ * @{
+ */
+#define VMMDEV_MEMORY_BALLOON_CHUNK_SIZE (1048576)
+#define VMMDEV_MEMORY_BALLOON_CHUNK_PAGES (1048576 / 4096)
+/** @} */
+
+/**
+ * Poll for ballooning change request.
+ *
+ * Used by VMMDevReq_GetMemBalloonChangeRequest.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Balloon size in megabytes. */
+ u32 cBalloonChunks;
+ /** Guest ram size in megabytes. */
+ u32 cPhysMemChunks;
+ /**
+ * Setting this to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST indicates that
+ * the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+} VMMDevGetMemBalloonChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetMemBalloonChangeRequest, 24+12);
+
+/**
+ * Change the size of the balloon.
+ *
+ * Used by VMMDevReq_ChangeMemBalloon.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** The number of pages in the array. */
+ u32 pages;
+ /** true = inflate, false = deflate. */
+ u32 inflate;
+ /** Physical address (u64) of each page. */
+ u64 phys_page[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES];
+} VMMDevChangeMemBalloon;
+
+/**
+ * Guest statistics interval change request structure.
+ *
+ * Used by VMMDevReq_GetStatisticsChangeRequest.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** The interval in seconds. */
+ u32 u32StatInterval;
+ /**
+ * Setting this to VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST
+ * indicates that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+} VMMDevGetStatisticsChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetStatisticsChangeRequest, 24+8);
+
+/**
+ * The size of a string field in the credentials request (including '\\0').
+ * @see VMMDevCredentials
+ */
+#define VMMDEV_CREDENTIALS_SZ_SIZE 128
+
+/**
+ * Credentials request structure.
+ *
+ * Used by VMMDevReq_QueryCredentials.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** IN/OUT: Request flags. */
+ u32 u32Flags;
+ /** OUT: User name (UTF-8). */
+ char szUserName[VMMDEV_CREDENTIALS_SZ_SIZE];
+ /** OUT: Password (UTF-8). */
+ char szPassword[VMMDEV_CREDENTIALS_SZ_SIZE];
+ /** OUT: Domain name (UTF-8). */
+ char szDomain[VMMDEV_CREDENTIALS_SZ_SIZE];
+} VMMDevCredentials;
+VMMDEV_ASSERT_SIZE(VMMDevCredentials, 24+4+3*128);
+
+/**
+ * @name Credentials request flag (VMMDevCredentials::u32Flags)
+ * @{
+ */
+/** query from host whether credentials are present */
+#define VMMDEV_CREDENTIALS_QUERYPRESENCE BIT(1)
+/** read credentials from host (can be combined with clear) */
+#define VMMDEV_CREDENTIALS_READ BIT(2)
+/** clear credentials on host (can be combined with read) */
+#define VMMDEV_CREDENTIALS_CLEAR BIT(3)
+/** read credentials for judgement in the guest */
+#define VMMDEV_CREDENTIALS_READJUDGE BIT(8)
+/** clear credentials for judegement on the host */
+#define VMMDEV_CREDENTIALS_CLEARJUDGE BIT(9)
+/** report credentials acceptance by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_OK BIT(10)
+/** report credentials denial by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_DENY BIT(11)
+/** report that no judgement could be made by guest */
+#define VMMDEV_CREDENTIALS_JUDGE_NOJUDGEMENT BIT(12)
+
+/** flag telling the guest that credentials are present */
+#define VMMDEV_CREDENTIALS_PRESENT BIT(16)
+/** flag telling guest that local logons should be prohibited */
+#define VMMDEV_CREDENTIALS_NOLOCALLOGON BIT(17)
+/** @} */
+
+/**
+ * Seamless mode.
+ *
+ * Used by VbglR3SeamlessWaitEvent
+ *
+ * @ingroup grp_vmmdev_req
+ *
+ * @todo DARN! DARN! DARN! Who forgot to do the 32-bit hack here???
+ * FIXME! XXX!
+ *
+ * We will now have to carefully check how our compilers have treated this
+ * flag. If any are compressing it into a byte type, we'll have to check
+ * how the request memory is initialized. If we are 104% sure it's ok to
+ * expand it, we'll expand it. If not, we must redefine the field to a
+ * u8 and a 3 byte padding.
+ */
+typedef enum {
+ /** normal mode; entire guest desktop displayed. */
+ VMMDev_Seamless_Disabled = 0,
+ /** visible region mode; only top-level guest windows displayed. */
+ VMMDev_Seamless_Visible_Region = 1,
+ /**
+ * windowed mode; each top-level guest window is represented in a
+ * host window.
+ */
+ VMMDev_Seamless_Host_Window = 2
+} VMMDevSeamlessMode;
+
+/**
+ * Seamless mode change request structure.
+ *
+ * Used by VMMDevReq_GetSeamlessChangeRequest.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+
+ /** New seamless mode. */
+ VMMDevSeamlessMode mode;
+ /**
+ * Setting this to VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST indicates
+ * that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+} VMMDevSeamlessChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSeamlessChangeRequest, 24+8);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevSeamlessChangeRequest, eventAck, 24+4);
+
+/**
+ * Display change request structure.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequest.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Horizontal pixel resolution (0 = do not change). */
+ u32 xres;
+ /** Vertical pixel resolution (0 = do not change). */
+ u32 yres;
+ /** Bits per pixel (0 = do not change). */
+ u32 bpp;
+ /**
+ * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+ * that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+} VMMDevDisplayChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequest, 24+16);
+
+/**
+ * Display change request structure, version 2.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequest2.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Horizontal pixel resolution (0 = do not change). */
+ u32 xres;
+ /** Vertical pixel resolution (0 = do not change). */
+ u32 yres;
+ /** Bits per pixel (0 = do not change). */
+ u32 bpp;
+ /**
+ * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+ * that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+ /** 0 for primary display, 1 for the first secondary, etc. */
+ u32 display;
+} VMMDevDisplayChangeRequest2;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequest2, 24+20);
+
+/**
+ * Display change request structure, version Extended.
+ *
+ * Used by VMMDevReq_GetDisplayChangeRequestEx.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Horizontal pixel resolution (0 = do not change). */
+ u32 xres;
+ /** Vertical pixel resolution (0 = do not change). */
+ u32 yres;
+ /** Bits per pixel (0 = do not change). */
+ u32 bpp;
+ /**
+ * Setting this to VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST indicates
+ * that the request is a response to that event.
+ * (Don't confuse this with VMMDevReq_AcknowledgeEvents.)
+ */
+ u32 eventAck;
+ /** 0 for primary display, 1 for the first secondary, etc. */
+ u32 display;
+ /** New OriginX of secondary virtual screen */
+ u32 cxOrigin;
+ /** New OriginY of secondary virtual screen */
+ u32 cyOrigin;
+ /** Change in origin of the secondary virtual screen is required */
+ bool fChangeOrigin;
+ /** Secondary virtual screen enabled or disabled */
+ bool fEnabled;
+} VMMDevDisplayChangeRequestEx;
+VMMDEV_ASSERT_SIZE(VMMDevDisplayChangeRequestEx, 24+32);
+
+/**
+ * Video mode supported request structure.
+ *
+ * Used by VMMDevReq_VideoModeSupported.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** IN: Horizontal pixel resolution. */
+ u32 width;
+ /** IN: Vertical pixel resolution. */
+ u32 height;
+ /** IN: Bits per pixel. */
+ u32 bpp;
+ /** OUT: Support indicator. */
+ bool fSupported;
+} VMMDevVideoModeSupportedRequest;
+VMMDEV_ASSERT_SIZE(VMMDevVideoModeSupportedRequest, 24+16);
+
+/**
+ * Video mode supported request structure for a specific display.
+ *
+ * Used by VMMDevReq_VideoModeSupported2.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** IN: The guest display number. */
+ u32 display;
+ /** IN: Horizontal pixel resolution. */
+ u32 width;
+ /** IN: Vertical pixel resolution. */
+ u32 height;
+ /** IN: Bits per pixel. */
+ u32 bpp;
+ /** OUT: Support indicator. */
+ bool fSupported;
+} VMMDevVideoModeSupportedRequest2;
+VMMDEV_ASSERT_SIZE(VMMDevVideoModeSupportedRequest2, 24+20);
+
+/**
+ * Video modes height reduction request structure.
+ *
+ * Used by VMMDevReq_GetHeightReduction.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** OUT: Height reduction in pixels. */
+ u32 heightReduction;
+} VMMDevGetHeightReductionRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetHeightReductionRequest, 24+4);
+
+/**
+ * VRDP change request structure.
+ *
+ * Used by VMMDevReq_GetVRDPChangeRequest.
+ */
+typedef struct {
+ /** Header */
+ VMMDevRequestHeader header;
+ /** Whether VRDP is active or not. */
+ u8 u8VRDPActive;
+ /** The configured experience level for active VRDP. */
+ u32 u32VRDPExperienceLevel;
+} VMMDevVRDPChangeRequest;
+VMMDEV_ASSERT_SIZE(VMMDevVRDPChangeRequest, 24+8);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevVRDPChangeRequest, u8VRDPActive, 24);
+VMMDEV_ASSERT_MEMBER_OFFSET(VMMDevVRDPChangeRequest, u32VRDPExperienceLevel,
+ 24+4);
+
+/**
+ * @name VRDP Experience level (VMMDevVRDPChangeRequest::u32VRDPExperienceLevel)
+ * @{
+ */
+#define VRDP_EXPERIENCE_LEVEL_ZERO 0 /**< Theming disabled. */
+#define VRDP_EXPERIENCE_LEVEL_LOW 1 /**< Full win drag + wallpaper dis. */
+#define VRDP_EXPERIENCE_LEVEL_MEDIUM 2 /**< Font smoothing, gradients. */
+#define VRDP_EXPERIENCE_LEVEL_HIGH 3 /**< Animation effects disabled. */
+#define VRDP_EXPERIENCE_LEVEL_FULL 4 /**< Everything enabled. */
+/** @} */
+
+/**
+ * VBVA enable request structure.
+ *
+ * Used by VMMDevReq_VideoAccelEnable.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** 0 - disable, !0 - enable. */
+ u32 u32Enable;
+ /**
+ * The size of VBVAMEMORY::au8RingBuffer expected by driver.
+ * The host will refuse to enable VBVA if the size is not equal to
+ * VBVA_RING_BUFFER_SIZE.
+ */
+ u32 cbRingBuffer;
+ /**
+ * Guest initializes the status to 0. Host sets appropriate
+ * VBVA_F_STATUS_ flags.
+ */
+ u32 fu32Status;
+} VMMDevVideoAccelEnable;
+VMMDEV_ASSERT_SIZE(VMMDevVideoAccelEnable, 24+12);
+
+/**
+ * @name VMMDevVideoAccelEnable::fu32Status.
+ * @{
+ */
+#define VBVA_F_STATUS_ACCEPTED (0x01)
+#define VBVA_F_STATUS_ENABLED (0x02)
+/** @} */
+
+/**
+ * VBVA flush request structure.
+ *
+ * Used by VMMDevReq_VideoAccelFlush.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+} VMMDevVideoAccelFlush;
+VMMDEV_ASSERT_SIZE(VMMDevVideoAccelFlush, 24);
+
+/**
+ * Rectangle data type, double point.
+ */
+typedef struct RTRECT {
+ /** left X coordinate. */
+ s32 xLeft;
+ /** top Y coordinate. */
+ s32 yTop;
+ /** right X coordinate. (exclusive) */
+ s32 xRight;
+ /** bottom Y coordinate. (exclusive) */
+ s32 yBottom;
+} RTRECT;
+
+/**
+ * VBVA set visible region request structure.
+ *
+ * Used by VMMDevReq_VideoSetVisibleRegion.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Number of rectangles */
+ u32 cRect;
+ /**
+ * Rectangle array.
+ * @todo array is spelled aRects[1].
+ */
+ RTRECT Rect;
+} VMMDevVideoSetVisibleRegion;
+VMMDEV_ASSERT_SIZE(RTRECT, 16);
+VMMDEV_ASSERT_SIZE(VMMDevVideoSetVisibleRegion, 24+4+16);
+
+/**
+ * CPU event types.
+ */
+typedef enum {
+ VMMDevCpuStatusType_Invalid = 0,
+ VMMDevCpuStatusType_Disable = 1,
+ VMMDevCpuStatusType_Enable = 2,
+ VMMDevCpuStatusType_SizeHack = 0x7fffffff
+} VMMDevCpuStatusType;
+
+/**
+ * CPU hotplug event status request.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Status type */
+ VMMDevCpuStatusType enmStatusType;
+} VMMDevCpuHotPlugStatusRequest;
+VMMDEV_ASSERT_SIZE(VMMDevCpuHotPlugStatusRequest, 24+4);
+
+/**
+ * CPU event types.
+ *
+ * Used by VbglR3CpuHotplugWaitForEvent
+ *
+ * @ingroup grp_vmmdev_req
+ */
+typedef enum {
+ VMMDevCpuEventType_Invalid = 0,
+ VMMDevCpuEventType_None = 1,
+ VMMDevCpuEventType_Plug = 2,
+ VMMDevCpuEventType_Unplug = 3,
+ VMMDevCpuEventType_SizeHack = 0x7fffffff
+} VMMDevCpuEventType;
+
+/**
+ * Get the ID of the changed CPU and event type.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Event type */
+ VMMDevCpuEventType enmEventType;
+ /** core id of the CPU changed */
+ u32 idCpuCore;
+ /** package id of the CPU changed */
+ u32 idCpuPackage;
+} VMMDevGetCpuHotPlugRequest;
+VMMDEV_ASSERT_SIZE(VMMDevGetCpuHotPlugRequest, 24+4+4+4);
+
+/**
+ * Shared region description
+ */
+typedef struct VMMDEVSHAREDREGIONDESC {
+ u64 GCRegionAddr;
+ u32 cbRegion;
+ u32 u32Alignment;
+} VMMDEVSHAREDREGIONDESC;
+VMMDEV_ASSERT_SIZE(VMMDEVSHAREDREGIONDESC, 16);
+
+#define VMMDEVSHAREDREGIONDESC_MAX 32
+
+/**
+ * Shared module registration
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Shared module size. */
+ u32 cbModule;
+ /** Number of included region descriptors */
+ u32 cRegions;
+ /** Base address of the shared module. */
+ u64 GCBaseAddr;
+ /** Guest OS type. */
+ VBOXOSFAMILY enmGuestOS;
+ /** Alignment. */
+ u32 u32Align;
+ /** Module name */
+ char szName[128];
+ /** Module version */
+ char szVersion[16];
+ /** Shared region descriptor(s). */
+ VMMDEVSHAREDREGIONDESC aRegions[1];
+} VMMDevSharedModuleRegistrationRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleRegistrationRequest,
+ 24+4+4+8+4+4+128+16+16);
+
+/**
+ * Shared module unregistration
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Shared module size. */
+ u32 cbModule;
+ /** Align at 8 byte boundary. */
+ u32 u32Alignment;
+ /** Base address of the shared module. */
+ u64 GCBaseAddr;
+ /** Module name */
+ char szName[128];
+ /** Module version */
+ char szVersion[16];
+} VMMDevSharedModuleUnregistrationRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleUnregistrationRequest, 24+4+4+8+128+16);
+
+/**
+ * Shared module periodic check
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+} VMMDevSharedModuleCheckRequest;
+VMMDEV_ASSERT_SIZE(VMMDevSharedModuleCheckRequest, 24);
+
+/**
+ * Paging sharing enabled query
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Enabled flag (out) */
+ bool fEnabled;
+ /** Alignment */
+ bool fAlignment[3];
+} VMMDevPageSharingStatusRequest;
+VMMDEV_ASSERT_SIZE(VMMDevPageSharingStatusRequest, 24+4);
+
+/**
+ * Page sharing status query (debug build only)
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Page address, 32 bits on 32 bit builds, 64 bit on 64 bit builds */
+ unsigned long GCPtrPage;
+ /** Page flags. */
+ u64 uPageFlags;
+ /** Shared flag (out) */
+ bool fShared;
+ /** Alignment */
+ bool fAlignment[3];
+} VMMDevPageIsSharedRequest;
+
+/**
+ * Session id request structure.
+ *
+ * Used by VMMDevReq_GetSessionId.
+ */
+typedef struct {
+ /** Header */
+ VMMDevRequestHeader header;
+ /**
+ * OUT: unique session id; the id will be different after each start,
+ * reset or restore of the VM.
+ */
+ u64 idSession;
+} VMMDevReqSessionId;
+VMMDEV_ASSERT_SIZE(VMMDevReqSessionId, 24+8);
+
+/**
+ * Write Core Dump request.
+ *
+ * Used by VMMDevReq_WriteCoreDump.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** Flags (reserved, MBZ). */
+ u32 fFlags;
+} VMMDevReqWriteCoreDump;
+VMMDEV_ASSERT_SIZE(VMMDevReqWriteCoreDump, 24+4);
+
+/** Heart beat check state structure. Used by VMMDevReq_HeartbeatConfigure. */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** OUT: Guest heartbeat interval in nanosec. */
+ u64 cNsInterval;
+ /** Heartbeat check flag. */
+ bool fEnabled;
+} VMMDevReqHeartbeat;
+VMMDEV_ASSERT_SIZE(VMMDevReqHeartbeat, 24+12);
+
+/**
+ * @name HGCM flags.
+ * @{
+ */
+#define VBOX_HGCM_REQ_DONE BIT(VBOX_HGCM_REQ_DONE_BIT)
+#define VBOX_HGCM_REQ_DONE_BIT 0
+#define VBOX_HGCM_REQ_CANCELLED (0x2)
+/** @} */
+
+/**
+ * HGCM request header.
+ */
+typedef struct VMMDevHGCMRequestHeader {
+ /** Request header. */
+ VMMDevRequestHeader header;
+
+ /** HGCM flags. */
+ u32 fu32Flags;
+
+ /** Result code. */
+ s32 result;
+} VMMDevHGCMRequestHeader;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMRequestHeader, 24+8);
+
+/**
+ * HGCM service location types.
+ * @ingroup grp_vmmdev_req
+ */
+typedef enum {
+ VMMDevHGCMLoc_Invalid = 0,
+ VMMDevHGCMLoc_LocalHost = 1,
+ VMMDevHGCMLoc_LocalHost_Existing = 2,
+ VMMDevHGCMLoc_SizeHack = 0x7fffffff
+} HGCMServiceLocationType;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocationType, 4);
+
+/**
+ * HGCM host service location.
+ * @ingroup grp_vmmdev_req
+ */
+typedef struct {
+ char achName[128]; /**< This is really szName. */
+} HGCMServiceLocationHost;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocationHost, 128);
+
+/**
+ * HGCM service location.
+ * @ingroup grp_vmmdev_req
+ */
+typedef struct HGCMSERVICELOCATION {
+ /** Type of the location. */
+ HGCMServiceLocationType type;
+
+ union {
+ HGCMServiceLocationHost host;
+ } u;
+} HGCMServiceLocation;
+VMMDEV_ASSERT_SIZE(HGCMServiceLocation, 128+4);
+
+/**
+ * HGCM connect request structure.
+ *
+ * Used by VMMDevReq_HGCMConnect.
+ */
+typedef struct {
+ /** HGCM request header. */
+ VMMDevHGCMRequestHeader header;
+
+ /** IN: Description of service to connect to. */
+ HGCMServiceLocation loc;
+
+ /** OUT: Client identifier assigned by local instance of HGCM. */
+ u32 u32ClientID;
+} VMMDevHGCMConnect;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMConnect, 32+132+4);
+
+/**
+ * HGCM disconnect request structure.
+ *
+ * Used by VMMDevReq_HGCMDisconnect.
+ */
+typedef struct {
+ /** HGCM request header. */
+ VMMDevHGCMRequestHeader header;
+
+ /** IN: Client identifier. */
+ u32 u32ClientID;
+} VMMDevHGCMDisconnect;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMDisconnect, 32+4);
+
+/**
+ * HGCM parameter type.
+ */
+typedef enum {
+ VMMDevHGCMParmType_Invalid = 0,
+ VMMDevHGCMParmType_32bit = 1,
+ VMMDevHGCMParmType_64bit = 2,
+ /** @deprecated Doesn't work, use PageList. */
+ VMMDevHGCMParmType_PhysAddr = 3,
+ /** In and Out */
+ VMMDevHGCMParmType_LinAddr = 4,
+ /** In (read; host<-guest) */
+ VMMDevHGCMParmType_LinAddr_In = 5,
+ /** Out (write; host->guest) */
+ VMMDevHGCMParmType_LinAddr_Out = 6,
+ /* 7 - 9 VMMDevHGCMParmType_LinAddr_Locked*, non Linux R0 usage only */
+ /** Physical addresses of locked pages for a buffer. */
+ VMMDevHGCMParmType_PageList = 10,
+ VMMDevHGCMParmType_SizeHack = 0x7fffffff
+} HGCMFunctionParameterType;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameterType, 4);
+
+/**
+ * HGCM function parameter, 32-bit client.
+ */
+typedef struct HGCMFunctionParameter32 {
+ HGCMFunctionParameterType type;
+ union {
+ u32 value32;
+ u64 value64;
+ struct {
+ u32 size;
+ union {
+ u32 physAddr;
+ u32 linearAddr;
+ } u;
+ } Pointer;
+ struct {
+ /** Size of the buffer described by the page list. */
+ u32 size;
+ /** Relative to the request header. */
+ u32 offset;
+ } PageList;
+ } u;
+} HGCMFunctionParameter32;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameter32, 4+8);
+
+/**
+ * HGCM function parameter, 64-bit client.
+ */
+typedef struct HGCMFunctionParameter64 {
+ HGCMFunctionParameterType type;
+ union {
+ u32 value32;
+ u64 value64;
+ struct {
+ u32 size;
+ union {
+ u64 physAddr;
+ u64 linearAddr;
+ } u;
+ } Pointer;
+ struct {
+ /** Size of the buffer described by the page list. */
+ u32 size;
+ /** Relative to the request header. */
+ u32 offset;
+ } PageList;
+ } u;
+} HGCMFunctionParameter64;
+VMMDEV_ASSERT_SIZE(HGCMFunctionParameter64, 4+12);
+
+#if __BITS_PER_LONG == 64
+#define HGCMFunctionParameter HGCMFunctionParameter64
+#else
+#define HGCMFunctionParameter HGCMFunctionParameter32
+#endif
+
+/**
+ * HGCM call request structure.
+ *
+ * Used by VMMDevReq_HGCMCall32 and VMMDevReq_HGCMCall64.
+ */
+typedef struct {
+ /* request header */
+ VMMDevHGCMRequestHeader header;
+
+ /** IN: Client identifier. */
+ u32 u32ClientID;
+ /** IN: Service function number. */
+ u32 u32Function;
+ /** IN: Number of parameters. */
+ u32 cParms;
+ /** Parameters follow in form: HGCMFunctionParameter32|64 aParms[X]; */
+} VMMDevHGCMCall;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCall, 32+12);
+
+/**
+ * @name Direction of data transfer (HGCMPageListInfo::flags). Bit flags.
+ * @{
+ */
+#define VBOX_HGCM_F_PARM_DIRECTION_NONE 0x00000000U
+#define VBOX_HGCM_F_PARM_DIRECTION_TO_HOST 0x00000001U
+#define VBOX_HGCM_F_PARM_DIRECTION_FROM_HOST 0x00000002U
+#define VBOX_HGCM_F_PARM_DIRECTION_BOTH 0x00000003U
+/**
+ * Macro for validating that the specified flags are valid.
+ * Note BOTH is not valid.
+ */
+#define VBOX_HGCM_F_PARM_ARE_VALID(fFlags) \
+ ((fFlags) > VBOX_HGCM_F_PARM_DIRECTION_NONE && \
+ (fFlags) < VBOX_HGCM_F_PARM_DIRECTION_BOTH)
+/** @} */
+
+/**
+ * VMMDevHGCMParmType_PageList points to this structure to actually describe the
+ * buffer.
+ */
+typedef struct {
+ u32 flags; /**< VBOX_HGCM_F_PARM_*. */
+ u16 offFirstPage; /**< Offset in the first page where data begins. */
+ u16 cPages; /**< Number of pages. */
+ u64 aPages[1]; /**< Page addresses. */
+} HGCMPageListInfo;
+VMMDEV_ASSERT_SIZE(HGCMPageListInfo, 4+2+2+8);
+
+/** Get the pointer to the first parmater of a HGCM call request. */
+#define VMMDEV_HGCM_CALL_PARMS(a) \
+ ((HGCMFunctionParameter *)((u8 *)(a) + sizeof(VMMDevHGCMCall)))
+
+#define VBOX_HGCM_MAX_PARMS 32
+
+/**
+ * HGCM cancel request structure.
+ *
+ * The Cancel request is issued using the same physical memory address as was
+ * used for the corresponding initial HGCMCall.
+ *
+ * Used by VMMDevReq_HGCMCancel.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevHGCMRequestHeader header;
+} VMMDevHGCMCancel;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCancel, 32);
+
+/**
+ * HGCM cancel request structure, version 2.
+ *
+ * Used by VMMDevReq_HGCMCancel2.
+ *
+ * VINF_SUCCESS when cancelled.
+ * VERR_NOT_FOUND if the specified request cannot be found.
+ * VERR_INVALID_PARAMETER if the address is invalid valid.
+ */
+typedef struct {
+ /** Header. */
+ VMMDevRequestHeader header;
+ /** The physical address of the request to cancel. */
+ u32 physReqToCancel;
+} VMMDevHGCMCancel2;
+VMMDEV_ASSERT_SIZE(VMMDevHGCMCancel2, 24+4);
+
+/** @} */
+
+#pragma pack()
+
+#endif
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
new file mode 100644
index 000000000000..3f2342838489
--- /dev/null
+++ b/include/uapi/linux/vboxguest.h
@@ -0,0 +1,374 @@
+/*
+ * VBoxGuest - VirtualBox Guest Additions Driver Interface. (ADD,DEV)
+ *
+ * Copyright (C) 2006-2016 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ *
+ * The contents of this file may alternatively be used under the terms
+ * of the Common Development and Distribution License Version 1.0
+ * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
+ * VirtualBox OSE distribution, in which case the provisions of the
+ * CDDL are applicable instead of those of the GPL.
+ *
+ * You may elect to license modified versions of this file under the
+ * terms and conditions of either the GPL or the CDDL or both.
+ */
+
+#ifndef __UAPI_VBOXGUEST_H__
+#define __UAPI_VBOXGUEST_H__
+
+#include <asm/bitsperlong.h>
+#include <linux/ioctl.h>
+#include <linux/vbox_vmmdev.h> /* For HGCMServiceLocation */
+
+/*
+ * We cannot use linux' compiletime_assert here because it expects to be used
+ * inside a function only. Use a typedef to a char array with a negative size.
+ */
+#define VBOXGUEST_ASSERT_SIZE(type, size) \
+ typedef char type ## _assert_size[1 - 2*!!(sizeof(type) != (size))]
+
+/**
+ * @defgroup grp_vboxguest VirtualBox Guest Additions Device Driver
+ *
+ * Also know as VBoxGuest.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup grp_vboxguest_ioc VirtualBox Guest Additions Driver Interface
+ * @{
+ */
+
+/**
+ * @name VBoxGuest IOCTL codes and structures.
+ *
+ * The range 0..15 is for basic driver communication.
+ * The range 16..31 is for HGCM communication.
+ * The range 32..47 is reserved for future use.
+ * The range 48..63 is for OS specific communication.
+ * The 7th bit is reserved for future hacks.
+ * The 8th bit is reserved for distinguishing between 32-bit and 64-bit
+ * processes in future 64-bit guest additions.
+ * @{
+ */
+#if __BITS_PER_LONG == 64
+#define VBOXGUEST_IOCTL_FLAG 128
+#else
+#define VBOXGUEST_IOCTL_FLAG 0
+#endif
+/** @} */
+
+#define VBOXGUEST_IOCTL_CODE_(function, size) \
+ _IOC(_IOC_READ|_IOC_WRITE, 'V', (function), (size))
+#define VBOXGUEST_IOCTL_STRIP_SIZE(code) \
+ VBOXGUEST_IOCTL_CODE_(_IOC_NR((code)), 0)
+
+#define VBOXGUEST_IOCTL_CODE(function, size) \
+ VBOXGUEST_IOCTL_CODE_((function) | VBOXGUEST_IOCTL_FLAG, size)
+/* Define 32 bit codes to support 32 bit applications in 64 bit guest driver. */
+#define VBOXGUEST_IOCTL_CODE_32(function, size) \
+VBOXGUEST_IOCTL_CODE_(function, size)
+
+
+/** IOCTL to VBoxGuest to wait for a VMMDev host notification */
+#define VBOXGUEST_IOCTL_WAITEVENT \
+ VBOXGUEST_IOCTL_CODE_(2, sizeof(VBoxGuestWaitEventInfo))
+
+/**
+ * @name Result codes for VBoxGuestWaitEventInfo::u32Result
+ * @{
+ */
+/** Successful completion, an event occurred. */
+#define VBOXGUEST_WAITEVENT_OK (0)
+/** Successful completion, timed out. */
+#define VBOXGUEST_WAITEVENT_TIMEOUT (1)
+/** Wait was interrupted. */
+#define VBOXGUEST_WAITEVENT_INTERRUPTED (2)
+/** An error occurred while processing the request. */
+#define VBOXGUEST_WAITEVENT_ERROR (3)
+/** @} */
+
+/** Input and output buffers layout of the IOCTL_VBOXGUEST_WAITEVENT */
+typedef struct VBoxGuestWaitEventInfo {
+ /** timeout in milliseconds */
+ u32 u32TimeoutIn;
+ /** events to wait for */
+ u32 u32EventMaskIn;
+ /** result code */
+ u32 u32Result;
+ /** events occurred */
+ u32 u32EventFlagsOut;
+} VBoxGuestWaitEventInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestWaitEventInfo, 16);
+
+
+/**
+ * IOCTL to VBoxGuest to perform a VMM request
+ * @remark The data buffer for this IOCtl has an variable size, keep this in
+ * mind on systems where this matters.
+ */
+#define VBOXGUEST_IOCTL_VMMREQUEST(size) \
+ VBOXGUEST_IOCTL_CODE_(3, (size))
+
+
+/** IOCTL to VBoxGuest to control event filter mask. */
+#define VBOXGUEST_IOCTL_CTL_FILTER_MASK \
+ VBOXGUEST_IOCTL_CODE_(4, sizeof(VBoxGuestFilterMaskInfo))
+
+/** Input and output buffer layout of the IOCTL_VBOXGUEST_CTL_FILTER_MASK. */
+typedef struct VBoxGuestFilterMaskInfo {
+ u32 u32OrMask;
+ u32 u32NotMask;
+} VBoxGuestFilterMaskInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestFilterMaskInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to interrupt (cancel) any pending WAITEVENTs and return.
+ * Handled inside the guest additions and not seen by the host at all.
+ * After calling this, VBOXGUEST_IOCTL_WAITEVENT should no longer be called in
+ * the same session. Any VBOXGUEST_IOCTL_WAITEVENT calls in the same session
+ * done after calling this will directly exit with VERR_INTERRUPTED.
+ * @see VBOXGUEST_IOCTL_WAITEVENT
+ */
+#define VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS \
+ VBOXGUEST_IOCTL_CODE_(5, 0)
+
+/**
+ * IOCTL to VBoxGuest to perform backdoor logging.
+ * The argument is a string buffer of the specified size.
+ */
+#define VBOXGUEST_IOCTL_LOG(size) \
+ VBOXGUEST_IOCTL_CODE_(6, (size))
+
+/**
+ * IOCTL to VBoxGuest to check memory ballooning. The guest kernel module /
+ * device driver will ask the host for the current size of the balloon and
+ * adjust the size. Or it will set fHandledInR0 = false and R3 is responsible
+ * for allocating memory and calling R0 (VBOXGUEST_IOCTL_CHANGE_BALLOON).
+ */
+#define VBOXGUEST_IOCTL_CHECK_BALLOON \
+ VBOXGUEST_IOCTL_CODE_(7, sizeof(VBoxGuestCheckBalloonInfo))
+
+/** Output buffer layout of the VBOXGUEST_IOCTL_CHECK_BALLOON. */
+typedef struct VBoxGuestCheckBalloonInfo {
+ /** The size of the balloon in chunks of 1MB. */
+ u32 cBalloonChunks;
+ /**
+ * false = handled in R0, no further action required.
+ * true = allocate balloon memory in R3.
+ */
+ u32 fHandleInR3;
+} VBoxGuestCheckBalloonInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestCheckBalloonInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to supply or revoke one chunk for ballooning.
+ * The guest kernel module / device driver will lock down supplied memory or
+ * unlock reclaimed memory and then forward the physical addresses of the
+ * changed balloon chunk to the host.
+ */
+#define VBOXGUEST_IOCTL_CHANGE_BALLOON \
+ VBOXGUEST_IOCTL_CODE_(8, sizeof(VBoxGuestChangeBalloonInfo))
+
+/**
+ * Input buffer layout of the VBOXGUEST_IOCTL_CHANGE_BALLOON request.
+ * Information about a memory chunk used to inflate or deflate the balloon.
+ */
+typedef struct VBoxGuestChangeBalloonInfo {
+ /** Address of the chunk. */
+ u64 u64ChunkAddr;
+ /** true = inflate, false = deflate. */
+ u32 fInflate;
+ /** Alignment padding. */
+ u32 u32Align;
+} VBoxGuestChangeBalloonInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestChangeBalloonInfo, 16);
+
+/** IOCTL to VBoxGuest to write guest core. */
+#define VBOXGUEST_IOCTL_WRITE_CORE_DUMP \
+ VBOXGUEST_IOCTL_CODE(9, sizeof(VBoxGuestWriteCoreDump))
+
+/** Input and output buffer layout of the VBOXGUEST_IOCTL_WRITE_CORE request. */
+typedef struct VBoxGuestWriteCoreDump {
+ /** Flags (reserved, MBZ). */
+ u32 fFlags;
+} VBoxGuestWriteCoreDump;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestWriteCoreDump, 4);
+
+/** IOCTL to VBoxGuest to update the mouse status features. */
+#define VBOXGUEST_IOCTL_SET_MOUSE_STATUS \
+ VBOXGUEST_IOCTL_CODE_(10, sizeof(u32))
+
+/** IOCTL to VBoxGuest to connect to a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_CONNECT \
+ VBOXGUEST_IOCTL_CODE(16, sizeof(VBoxGuestHGCMConnectInfo))
+
+/**
+ * HGCM connect info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CONNECT.
+ */
+struct VBoxGuestHGCMConnectInfo {
+ s32 result; /**< OUT */
+ HGCMServiceLocation Loc; /**< IN */
+ u32 u32ClientID; /**< OUT */
+} __packed;
+typedef struct VBoxGuestHGCMConnectInfo VBoxGuestHGCMConnectInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestHGCMConnectInfo, 4+4+128+4);
+
+/** IOCTL to VBoxGuest to disconnect from a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_DISCONNECT \
+ VBOXGUEST_IOCTL_CODE(17, sizeof(VBoxGuestHGCMDisconnectInfo))
+
+/**
+ * HGCM disconnect info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_DISCONNECT.
+ */
+typedef struct VBoxGuestHGCMDisconnectInfo {
+ s32 result; /**< OUT */
+ u32 u32ClientID; /**< IN */
+} VBoxGuestHGCMDisconnectInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestHGCMDisconnectInfo, 8);
+
+/**
+ * IOCTL to VBoxGuest to make a call to a HGCM service.
+ * @see VBoxGuestHGCMCallInfo
+ */
+#define VBOXGUEST_IOCTL_HGCM_CALL(size) \
+ VBOXGUEST_IOCTL_CODE(18, (size))
+
+/**
+ * HGCM call info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CALL.
+ */
+typedef struct VBoxGuestHGCMCallInfo {
+ s32 result; /**< OUT Host HGCM return code.*/
+ u32 u32ClientID; /**< IN The id of the caller. */
+ u32 u32Function; /**< IN Function number. */
+ u32 cParms; /**< IN How many parms. */
+ /* Parameters follow in form HGCMFunctionParameter aParms[cParms] */
+} VBoxGuestHGCMCallInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestHGCMCallInfo, 16);
+
+/** IOCTL to VBoxGuest to make a timed call to a HGCM service. */
+#define VBOXGUEST_IOCTL_HGCM_CALL_TIMED(size) \
+ VBOXGUEST_IOCTL_CODE(20, (size))
+
+/**
+ * HGCM call info structure.
+ *
+ * This is used by VBOXGUEST_IOCTL_HGCM_CALL_TIMED.
+ */
+struct VBoxGuestHGCMCallInfoTimed {
+ /** IN How long to wait for completion before cancelling the call. */
+ u32 u32Timeout;
+ /** IN Is this request interruptible? */
+ u32 fInterruptible;
+ /**
+ * IN/OUT The rest of the call information. Placed after the timeout
+ * so that the parameters follow as they would for a normal call.
+ */
+ VBoxGuestHGCMCallInfo info;
+ /* Parameters follow in form HGCMFunctionParameter aParms[cParms] */
+} __packed;
+typedef struct VBoxGuestHGCMCallInfoTimed VBoxGuestHGCMCallInfoTimed;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestHGCMCallInfoTimed, 8+16);
+
+/**
+ * @name IOCTL numbers that 32-bit clients, like the Windows OpenGL guest
+ * driver, will use when talking to a 64-bit driver.
+ * @remarks These are only used by the driver implementation!
+ * @{
+ */
+#define VBOXGUEST_IOCTL_HGCM_CONNECT_32 \
+ VBOXGUEST_IOCTL_CODE_32(16, sizeof(VBoxGuestHGCMConnectInfo))
+#define VBOXGUEST_IOCTL_HGCM_DISCONNECT_32 \
+ VBOXGUEST_IOCTL_CODE_32(17, sizeof(VBoxGuestHGCMDisconnectInfo))
+#define VBOXGUEST_IOCTL_HGCM_CALL_32(size) \
+ VBOXGUEST_IOCTL_CODE_32(18, (size))
+#define VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(size) \
+ VBOXGUEST_IOCTL_CODE_32(20, (size))
+/** @} */
+
+/** Get the pointer to the first HGCM parameter. */
+#define VBOXGUEST_HGCM_CALL_PARMS(a) \
+ ((HGCMFunctionParameter *)((u8 *)(a) + sizeof(VBoxGuestHGCMCallInfo)))
+/** Get the pointer to the first HGCM parameter in a 32-bit request. */
+#define VBOXGUEST_HGCM_CALL_PARMS32(a) \
+ ((HGCMFunctionParameter32 *)((u8 *)(a) + sizeof(VBoxGuestHGCMCallInfo)))
+
+typedef enum VBOXGUESTCAPSACQUIRE_FLAGS {
+ VBOXGUESTCAPSACQUIRE_FLAGS_NONE = 0,
+ /*
+ * Configures VBoxGuest to use the specified caps in Acquire mode, w/o
+ * making any caps acquisition/release. so far it is only possible to
+ * set acquire mode for caps, but not clear it, so u32NotMask is
+ * ignored for this request.
+ */
+ VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE,
+ /* To ensure enum is 32bit */
+ VBOXGUESTCAPSACQUIRE_FLAGS_32bit = 0x7fffffff
+} VBOXGUESTCAPSACQUIRE_FLAGS;
+
+typedef struct VBoxGuestCapsAquire {
+ /*
+ * result status
+ * VINF_SUCCESS - on success
+ * VERR_RESOURCE_BUSY - some caps in the u32OrMask are acquired by some
+ * other VBoxGuest connection. NOTE: no u32NotMask caps are cleaned
+ * in this case, No modifications are done on failure.
+ * VER_INVALID_PARAMETER - invalid Caps are specified with either
+ * u32OrMask or u32NotMask. No modifications are done on failure.
+ */
+ s32 rc;
+ /* Acquire command */
+ VBOXGUESTCAPSACQUIRE_FLAGS enmFlags;
+ /* caps to acquire, OR-ed VMMDEV_GUEST_SUPPORTS_XXX flags */
+ u32 u32OrMask;
+ /* caps to release, OR-ed VMMDEV_GUEST_SUPPORTS_XXX flags */
+ u32 u32NotMask;
+} VBoxGuestCapsAquire;
+
+/**
+ * IOCTL to for Acquiring/Releasing Guest Caps
+ * This is used for multiple purposes:
+ * 1. By doing Acquire r3 client application (e.g. VBoxTray) claims it will use
+ * the given connection for performing operations like Auto-resize, or
+ * Seamless. If the application terminates, the driver will automatically
+ * cleanup the caps reported to host, so that host knows guest does not
+ * support them anymore.
+ * 2. In a multy-user environment this will not allow r3 applications (like
+ * VBoxTray) running in different user sessions simultaneously to interfere
+ * with each other. An r3 client application (like VBoxTray) is responsible
+ * for Acquiring/Releasing caps properly as needed.
+ **/
+#define VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE \
+ VBOXGUEST_IOCTL_CODE(32, sizeof(VBoxGuestCapsAquire))
+
+/** IOCTL to VBoxGuest to set guest capabilities. */
+#define VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES \
+ VBOXGUEST_IOCTL_CODE_(33, sizeof(VBoxGuestSetCapabilitiesInfo))
+
+/** Input/output buffer layout for VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES. */
+typedef struct VBoxGuestSetCapabilitiesInfo {
+ u32 u32OrMask;
+ u32 u32NotMask;
+} VBoxGuestSetCapabilitiesInfo;
+VBOXGUEST_ASSERT_SIZE(VBoxGuestSetCapabilitiesInfo, 8);
+
+/** @} */
+
+/** @} */
+
+#endif
--
2.13.3