[RESEND PATCH v2 1/4] usb: dbc: early driver for xhci debug capability

From: Lu Baolu
Date: Tue Oct 18 2016 - 20:19:27 EST


xHCI debug capability (DbC) is an optional but standalone
functionality provided by an xHCI host controller. Software
learns this capability by walking through the extended
capability list of the host. xHCI specification describes
DbC in section 7.6.

This patch introduces the code to probe and initialize the
debug capability hardware during early boot. With hardware
initialized, the debug target (system on which this code is
running) will present a debug device through the debug port
(normally the first USB3 port). The debug device is fully
compliant with the USB framework and provides the equivalent
of a very high performance (USB3) full-duplex serial link
between the debug host and target. The DbC functionality is
independent of xHCI host. There isn't any precondition from
xHCI host side for DbC to work.

This patch also includes bulk out and bulk in interfaces.
These interfaces could be used to implement early printk
bootconsole or hook to various system debuggers.

This code is designed to be only used for kernel debugging
when machine crashes very early before the console code is
initialized. For normal operation it is not recommended.

Cc: Mathias Nyman <mathias.nyman@xxxxxxxxxxxxxxx>
Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
---
arch/x86/Kconfig.debug | 14 +
drivers/usb/Kconfig | 3 +
drivers/usb/Makefile | 2 +-
drivers/usb/early/Makefile | 1 +
drivers/usb/early/xhci-dbc.c | 1097 +++++++++++++++++++++++++++++++++++++++++
drivers/usb/early/xhci-dbc.h | 206 ++++++++
include/linux/usb/xhci-dbgp.h | 22 +
7 files changed, 1344 insertions(+), 1 deletion(-)
create mode 100644 drivers/usb/early/xhci-dbc.c
create mode 100644 drivers/usb/early/xhci-dbc.h
create mode 100644 include/linux/usb/xhci-dbgp.h

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 67eec55..13e85b7 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -29,6 +29,7 @@ config EARLY_PRINTK
config EARLY_PRINTK_DBGP
bool "Early printk via EHCI debug port"
depends on EARLY_PRINTK && PCI
+ select USB_EARLY_PRINTK
---help---
Write kernel log output directly into the EHCI debug port.

@@ -48,6 +49,19 @@ config EARLY_PRINTK_EFI
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized.

+config EARLY_PRINTK_XDBC
+ bool "Early printk via xHCI debug port"
+ depends on EARLY_PRINTK && PCI
+ select USB_EARLY_PRINTK
+ ---help---
+ Write kernel log output directly into the xHCI debug port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
config X86_PTDUMP_CORE
def_bool n

diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 644e978..860d81b1 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -19,6 +19,9 @@ config USB_EHCI_BIG_ENDIAN_MMIO
config USB_EHCI_BIG_ENDIAN_DESC
bool

+config USB_EARLY_PRINTK
+ bool
+
menuconfig USB_SUPPORT
bool "USB support"
depends on HAS_IOMEM
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index dca7856..dd91ca1 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -48,7 +48,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
obj-$(CONFIG_USB_SERIAL) += serial/

obj-$(CONFIG_USB) += misc/
-obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/
+obj-$(CONFIG_USB_EARLY_PRINTK) += early/

obj-$(CONFIG_USB_ATM) += atm/
obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
diff --git a/drivers/usb/early/Makefile b/drivers/usb/early/Makefile
index 24bbe51..2db5906 100644
--- a/drivers/usb/early/Makefile
+++ b/drivers/usb/early/Makefile
@@ -3,3 +3,4 @@
#

obj-$(CONFIG_EARLY_PRINTK_DBGP) += ehci-dbgp.o
+obj-$(CONFIG_EARLY_PRINTK_XDBC) += xhci-dbc.o
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
new file mode 100644
index 0000000..939fff2
--- /dev/null
+++ b/drivers/usb/early/xhci-dbc.c
@@ -0,0 +1,1097 @@
+/**
+ * xhci-dbc.c - xHCI debug capability early driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
+#include <linux/console.h>
+#include <linux/pci_regs.h>
+#include <linux/pci_ids.h>
+#include <linux/bootmem.h>
+#include <linux/io.h>
+#include <asm/pci-direct.h>
+#include <asm/fixmap.h>
+#include <linux/bcd.h>
+#include <linux/export.h>
+#include <linux/version.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#include "../host/xhci.h"
+#include "xhci-dbc.h"
+
+static struct xdbc_state xdbc;
+static int early_console_keep;
+static struct workqueue_struct *xdbc_wq;
+
+#ifdef XDBC_TRACE
+#define xdbc_trace trace_printk
+#else
+static inline void xdbc_trace(const char *fmt, ...) { }
+#endif /* XDBC_TRACE */
+
+static int xdbc_bulk_transfer(void *data, int size, bool read);
+
+static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
+{
+ u32 val, sz;
+ u64 val64, sz64, mask64;
+ u8 byte;
+ void __iomem *base;
+
+ val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
+ write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
+ sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
+ write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);
+ if (val == 0xffffffff || sz == 0xffffffff) {
+ pr_notice("invalid mmio bar\n");
+ return NULL;
+ }
+
+ val64 = val & PCI_BASE_ADDRESS_MEM_MASK;
+ sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
+ mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+
+ if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
+ PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
+ write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
+ sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
+ write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);
+
+ val64 |= ((u64)val << 32);
+ sz64 |= ((u64)sz << 32);
+ mask64 |= ((u64)~0 << 32);
+ }
+
+ sz64 &= mask64;
+
+ if (sizeof(dma_addr_t) < 8 || !sz64) {
+ pr_notice("invalid mmio address\n");
+ return NULL;
+ }
+
+ sz64 = 1ULL << __ffs64(sz64);
+
+ /* check if the mem space is enabled */
+ byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
+ if (!(byte & PCI_COMMAND_MEMORY)) {
+ byte |= PCI_COMMAND_MEMORY;
+ write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
+ }
+
+ xdbc.xhci_start = val64;
+ xdbc.xhci_length = sz64;
+ base = early_ioremap(val64, sz64);
+
+ return base;
+}
+
+static void * __init xdbc_get_page(dma_addr_t *dma_addr)
+{
+ void *virt;
+
+ virt = alloc_bootmem_pages_nopanic(PAGE_SIZE);
+ if (!virt)
+ return NULL;
+
+ if (dma_addr)
+ *dma_addr = (dma_addr_t)__pa(virt);
+
+ return virt;
+}
+
+static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f)
+{
+ u32 bus, dev, func, class;
+
+ for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) {
+ for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) {
+ for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) {
+ class = read_pci_config(bus, dev, func,
+ PCI_CLASS_REVISION);
+ if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI)
+ continue;
+
+ if (xdbc_num-- != 0)
+ continue;
+
+ *b = bus;
+ *d = dev;
+ *f = func;
+
+ return 0;
+ }
+ }
+ }
+
+ return -1;
+}
+
+static void xdbc_early_delay(unsigned long count)
+{
+ u8 val;
+
+ val = inb(0x80);
+ while (count-- > 0)
+ outb(val, 0x80);
+}
+
+static void xdbc_runtime_delay(unsigned long count)
+{
+ udelay(count);
+}
+
+static void (*xdbc_delay)(unsigned long) = xdbc_early_delay;
+
+static int handshake(void __iomem *ptr, u32 mask, u32 done,
+ int wait, int delay)
+{
+ u32 result;
+
+ do {
+ result = readl(ptr);
+ result &= mask;
+ if (result == done)
+ return 0;
+ xdbc_delay(delay);
+ wait -= delay;
+ } while (wait > 0);
+
+ return -ETIMEDOUT;
+}
+
+static void __init xdbc_bios_handoff(void)
+{
+ int ext_cap_offset;
+ int timeout;
+ u32 val;
+
+ ext_cap_offset = xhci_find_next_ext_cap(xdbc.xhci_base,
+ 0, XHCI_EXT_CAPS_LEGACY);
+ val = readl(xdbc.xhci_base + ext_cap_offset);
+
+ /* If the BIOS owns the HC, signal that the OS wants it, and wait */
+ if (val & XHCI_HC_BIOS_OWNED) {
+ writel(val | XHCI_HC_OS_OWNED,
+ xdbc.xhci_base + ext_cap_offset);
+ timeout = handshake(xdbc.xhci_base + ext_cap_offset,
+ XHCI_HC_BIOS_OWNED, 0, 5000, 10);
+
+ /* Assume a buggy BIOS and take HC ownership anyway */
+ if (timeout) {
+ pr_notice("xHCI BIOS handoff failed\n");
+ writel(val & ~XHCI_HC_BIOS_OWNED,
+ xdbc.xhci_base + ext_cap_offset);
+ }
+ }
+
+ /* Disable any BIOS SMIs and clear all SMI events*/
+ val = readl(xdbc.xhci_base + ext_cap_offset +
+ XHCI_LEGACY_CONTROL_OFFSET);
+ val &= XHCI_LEGACY_DISABLE_SMI;
+ val |= XHCI_LEGACY_SMI_EVENTS;
+ writel(val, xdbc.xhci_base + ext_cap_offset +
+ XHCI_LEGACY_CONTROL_OFFSET);
+}
+
+static int __init xdbc_alloc_ring(struct xdbc_segment *seg,
+ struct xdbc_ring *ring)
+{
+ seg->trbs = xdbc_get_page(&seg->dma);
+ if (!seg->trbs)
+ return -ENOMEM;
+
+ ring->segment = seg;
+
+ return 0;
+}
+
+static void __init xdbc_free_ring(struct xdbc_ring *ring)
+{
+ struct xdbc_segment *seg = ring->segment;
+
+ if (!seg)
+ return;
+
+ free_bootmem(seg->dma, PAGE_SIZE);
+ ring->segment = NULL;
+}
+
+static void xdbc_reset_ring(struct xdbc_ring *ring)
+{
+ struct xdbc_trb *link_trb;
+ struct xdbc_segment *seg = ring->segment;
+
+ memset(seg->trbs, 0, PAGE_SIZE);
+
+ ring->enqueue = seg->trbs;
+ ring->dequeue = seg->trbs;
+ ring->cycle_state = 1;
+
+ if (ring != &xdbc.evt_ring) {
+ link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1];
+ link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma));
+ link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma));
+ link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) |
+ cpu_to_le32(LINK_TOGGLE);
+ }
+}
+
+static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ s[i] = cpu_to_le16(c[i]);
+}
+
+static void xdbc_mem_init(void)
+{
+ struct xdbc_erst_entry *entry;
+ struct xdbc_strings *strings;
+ struct xdbc_context *context;
+ struct xdbc_ep_context *ep_in, *ep_out;
+ struct usb_string_descriptor *s_desc;
+ unsigned int max_burst;
+ u32 string_length;
+ int index = 0;
+ u32 dev_info;
+
+ xdbc_reset_ring(&xdbc.evt_ring);
+ xdbc_reset_ring(&xdbc.in_ring);
+ xdbc_reset_ring(&xdbc.out_ring);
+ memset(xdbc.table_base, 0, PAGE_SIZE);
+ memset(xdbc.out_buf, 0, PAGE_SIZE);
+
+ /* Initialize event ring segment table */
+ xdbc.erst_size = 16;
+ xdbc.erst_base = xdbc.table_base +
+ index * XDBC_TABLE_ENTRY_SIZE;
+ xdbc.erst_dma = xdbc.table_dma +
+ index * XDBC_TABLE_ENTRY_SIZE;
+ index += XDBC_ERST_ENTRY_NUM;
+
+ /* Initialize Event Ring Segment Table */
+ entry = (struct xdbc_erst_entry *)xdbc.erst_base;
+ entry->seg_addr = cpu_to_le64(xdbc.evt_seg.dma);
+ entry->seg_size = cpu_to_le32(XDBC_TRBS_PER_SEGMENT);
+ entry->rsvd = 0;
+
+ /* Initialize ERST registers */
+ writel(1, &xdbc.xdbc_reg->ersts);
+ xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba);
+ xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp);
+
+ /* debug capability contexts */
+ BUILD_BUG_ON(sizeof(struct xdbc_info_context) != 64);
+ BUILD_BUG_ON(sizeof(struct xdbc_ep_context) != 64);
+ BUILD_BUG_ON(sizeof(struct xdbc_context) != 64 * 3);
+
+ xdbc.dbcc_size = 64 * 3;
+ xdbc.dbcc_base = xdbc.table_base +
+ index * XDBC_TABLE_ENTRY_SIZE;
+ xdbc.dbcc_dma = xdbc.table_dma +
+ index * XDBC_TABLE_ENTRY_SIZE;
+ index += XDBC_DBCC_ENTRY_NUM;
+
+ /* strings */
+ xdbc.string_size = sizeof(struct xdbc_strings);
+ xdbc.string_base = xdbc.table_base +
+ index * XDBC_TABLE_ENTRY_SIZE;
+ xdbc.string_dma = xdbc.table_dma +
+ index * XDBC_TABLE_ENTRY_SIZE;
+ index += XDBC_STRING_ENTRY_NUM;
+
+ strings = (struct xdbc_strings *)xdbc.string_base;
+
+ /* serial string */
+ s_desc = (struct usb_string_descriptor *)strings->serial;
+ s_desc->bLength = (strlen(XDBC_STRING_SERIAL) + 1) * 2;
+ s_desc->bDescriptorType = USB_DT_STRING;
+ xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL,
+ strlen(XDBC_STRING_SERIAL));
+
+ string_length = s_desc->bLength;
+ string_length <<= 8;
+
+ /* product string */
+ s_desc = (struct usb_string_descriptor *)strings->product;
+ s_desc->bLength = (strlen(XDBC_STRING_PRODUCT) + 1) * 2;
+ s_desc->bDescriptorType = USB_DT_STRING;
+ xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT,
+ strlen(XDBC_STRING_PRODUCT));
+
+ string_length += s_desc->bLength;
+ string_length <<= 8;
+
+ /* manufacture string */
+ s_desc = (struct usb_string_descriptor *)strings->manufacture;
+ s_desc->bLength = (strlen(XDBC_STRING_MANUFACTURE) + 1) * 2;
+ s_desc->bDescriptorType = USB_DT_STRING;
+ xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURE,
+ strlen(XDBC_STRING_MANUFACTURE));
+
+ string_length += s_desc->bLength;
+ string_length <<= 8;
+
+ /* string 0 */
+ strings->string0[0] = 4;
+ strings->string0[1] = USB_DT_STRING;
+ strings->string0[2] = 0x09;
+ strings->string0[3] = 0x04;
+
+ string_length += 4;
+
+ /* populate the contexts */
+ context = (struct xdbc_context *)xdbc.dbcc_base;
+ context->info.string0 = cpu_to_le64(xdbc.string_dma);
+ context->info.manufacture = cpu_to_le64(xdbc.string_dma +
+ XDBC_MAX_STRING_LENGTH);
+ context->info.product = cpu_to_le64(xdbc.string_dma +
+ XDBC_MAX_STRING_LENGTH * 2);
+ context->info.serial = cpu_to_le64(xdbc.string_dma +
+ XDBC_MAX_STRING_LENGTH * 3);
+ context->info.length = cpu_to_le32(string_length);
+
+ max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control));
+ ep_out = (struct xdbc_ep_context *)&context->out;
+ ep_out->ep_info1 = 0;
+ ep_out->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) |
+ MAX_PACKET(1024) | MAX_BURST(max_burst));
+ ep_out->deq = cpu_to_le64(xdbc.out_seg.dma |
+ xdbc.out_ring.cycle_state);
+
+ ep_in = (struct xdbc_ep_context *)&context->in;
+ ep_in->ep_info1 = 0;
+ ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) |
+ MAX_PACKET(1024) | MAX_BURST(max_burst));
+ ep_in->deq = cpu_to_le64(xdbc.in_seg.dma |
+ xdbc.in_ring.cycle_state);
+
+ /* write DbC context pointer register */
+ xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp);
+
+ /* device descriptor info registers */
+ dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL);
+ writel(dev_info, &xdbc.xdbc_reg->devinfo1);
+ dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID);
+ writel(dev_info, &xdbc.xdbc_reg->devinfo2);
+
+ xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET;
+ xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET;
+}
+
+static void xdbc_do_reset_debug_port(u32 id, u32 count)
+{
+ u32 val, cap_length;
+ void __iomem *ops_reg;
+ void __iomem *portsc;
+ int i;
+
+ cap_length = readl(xdbc.xhci_base) & 0xff;
+ ops_reg = xdbc.xhci_base + cap_length;
+
+ id--;
+ for (i = id; i < (id + count); i++) {
+ portsc = ops_reg + 0x400 + i * 0x10;
+ val = readl(portsc);
+ /* reset the port if CCS bit is cleared */
+ if (!(val & PORT_CONNECT))
+ writel(val | PORT_RESET, portsc);
+ }
+}
+
+static void __init xdbc_reset_debug_port(void)
+{
+ int offset = 0;
+ u32 val, port_offset, port_count;
+
+ do {
+ offset = xhci_find_next_ext_cap(xdbc.xhci_base,
+ offset,
+ XHCI_EXT_CAPS_PROTOCOL);
+ if (!offset)
+ break;
+
+ val = readl(xdbc.xhci_base + offset);
+ if (XHCI_EXT_PORT_MAJOR(val) != 0x3)
+ continue;
+
+ val = readl(xdbc.xhci_base + offset + 8);
+ port_offset = XHCI_EXT_PORT_OFF(val);
+ port_count = XHCI_EXT_PORT_COUNT(val);
+
+ xdbc_do_reset_debug_port(port_offset, port_count);
+ } while (1);
+}
+
+static void xdbc_queue_trb(struct xdbc_ring *ring,
+ u32 field1, u32 field2, u32 field3, u32 field4)
+{
+ struct xdbc_trb *trb, *link_trb;
+
+ trb = ring->enqueue;
+ trb->field[0] = cpu_to_le32(field1);
+ trb->field[1] = cpu_to_le32(field2);
+ trb->field[2] = cpu_to_le32(field3);
+ trb->field[3] = cpu_to_le32(field4);
+
+ ++(ring->enqueue);
+ if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
+ link_trb = ring->enqueue;
+ if (ring->cycle_state)
+ link_trb->field[3] |= cpu_to_le32(TRB_CYCLE);
+ else
+ link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+ ring->enqueue = ring->segment->trbs;
+ ring->cycle_state ^= 1;
+ }
+}
+
+static void xdbc_ring_doorbell(int target)
+{
+ writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell);
+}
+
+static int xdbc_start(void)
+{
+ u32 ctrl, status;
+ int ret;
+
+ ctrl = readl(&xdbc.xdbc_reg->control);
+ writel(ctrl | CTRL_DCE | CTRL_PED, &xdbc.xdbc_reg->control);
+ ret = handshake(&xdbc.xdbc_reg->control,
+ CTRL_DCE, CTRL_DCE, 100000, 100);
+ if (ret) {
+ xdbc_trace("failed to initialize hardware\n");
+ return ret;
+ }
+
+ /* reset port to avoid bus hang */
+ if (xdbc.vendor == PCI_VENDOR_ID_INTEL)
+ xdbc_reset_debug_port();
+
+ /* wait for port connection */
+ ret = handshake(&xdbc.xdbc_reg->portsc,
+ PORTSC_CCS, PORTSC_CCS, 5000000, 100);
+ if (ret) {
+ xdbc_trace("waiting for connection timed out\n");
+ return ret;
+ }
+
+ /* wait for debug device to be configured */
+ ret = handshake(&xdbc.xdbc_reg->control,
+ CTRL_DCR, CTRL_DCR, 5000000, 100);
+ if (ret) {
+ xdbc_trace("waiting for device configuration timed out\n");
+ return ret;
+ }
+
+ /* port should have a valid port# */
+ status = readl(&xdbc.xdbc_reg->status);
+ if (!DCST_DPN(status)) {
+ xdbc_trace("invalid root hub port number\n");
+ return -ENODEV;
+ }
+
+ xdbc.port_number = DCST_DPN(status);
+
+ xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
+ readl(&xdbc.xdbc_reg->control), xdbc.port_number);
+
+ return 0;
+}
+
+static int xdbc_handle_external_reset(void)
+{
+ int ret = 0;
+ static int failure_count;
+
+ xdbc_trace("external reset detected\n");
+
+ if (failure_count >= 5)
+ return -ENODEV;
+
+ xdbc.flags = 0;
+ writel(0, &xdbc.xdbc_reg->control);
+ ret = handshake(&xdbc.xdbc_reg->control, CTRL_DCE, 0, 100000, 10);
+ if (ret)
+ goto count_and_out;
+
+ xdbc_mem_init();
+
+ mmiowb();
+
+ ret = xdbc_start();
+ if (ret < 0)
+ goto count_and_out;
+
+ xdbc_trace("dbc recovered\n");
+
+ xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
+
+ xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+
+ return 0;
+
+count_and_out:
+ failure_count++;
+ xdbc_trace("failed to recover from external reset, retried %d\n",
+ failure_count);
+ return ret;
+}
+
+static int xdbc_bulk_transfer(void *data, int size, bool read)
+{
+ u64 addr;
+ u32 length, control;
+ struct xdbc_trb *trb;
+ struct xdbc_ring *ring;
+ u32 cycle;
+ int ret;
+
+ if (size > XDBC_MAX_PACKET) {
+ xdbc_trace("oops: bad parameter, size %d\n", size);
+ return -EINVAL;
+ }
+
+ if (!(readl(&xdbc.xdbc_reg->control) & CTRL_DCE)) {
+ ret = xdbc_handle_external_reset();
+ if (ret) {
+ xdbc_trace("oops: hardware failed to recover\n");
+ return -EIO;
+ }
+ }
+
+ if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) ||
+ !(xdbc.flags & XDBC_FLAGS_CONFIGURED) ||
+ (!read && (xdbc.flags & XDBC_FLAGS_BULKOUT_STALL)) ||
+ (read && (xdbc.flags & XDBC_FLAGS_BULKIN_STALL))) {
+ xdbc_trace("oops: hardware not ready %08x\n", xdbc.flags);
+ return -EIO;
+ }
+
+ ring = (read ? &xdbc.in_ring : &xdbc.out_ring);
+ trb = ring->enqueue;
+ cycle = ring->cycle_state;
+
+ length = TRB_LEN(size);
+ control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
+
+ if (cycle)
+ control &= cpu_to_le32(~TRB_CYCLE);
+ else
+ control |= cpu_to_le32(TRB_CYCLE);
+
+ if (read) {
+ memset(xdbc.in_buf, 0, XDBC_MAX_PACKET);
+ addr = xdbc.in_dma;
+ xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
+ } else {
+ memset(xdbc.out_buf, 0, XDBC_MAX_PACKET);
+ memcpy(xdbc.out_buf, data, size);
+ addr = xdbc.out_dma;
+ xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
+ }
+
+ xdbc_queue_trb(ring, lower_32_bits(addr),
+ upper_32_bits(addr),
+ length, control);
+
+ /*
+ * Memory barrier to ensure hardware sees the trbs
+ * enqueued above.
+ */
+ wmb();
+ if (cycle)
+ trb->field[3] |= cpu_to_le32(cycle);
+ else
+ trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+ xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL);
+
+ return size;
+}
+
+static int __init xdbc_early_setup(void)
+{
+ int ret;
+
+ writel(0, &xdbc.xdbc_reg->control);
+ ret = handshake(&xdbc.xdbc_reg->control, CTRL_DCE, 0, 100000, 100);
+ if (ret)
+ return ret;
+
+ /* allocate table page */
+ xdbc.table_base = xdbc_get_page(&xdbc.table_dma);
+ if (!xdbc.table_base)
+ return -ENOMEM;
+
+ /* get and store the transfer buffer */
+ xdbc.out_buf = xdbc_get_page(&xdbc.out_dma);
+ if (!xdbc.out_buf)
+ return -ENOMEM;
+
+ /* allocate event ring */
+ ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring);
+ if (ret < 0)
+ return ret;
+
+ /* IN/OUT endpoint transfer ring */
+ ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring);
+ if (ret < 0)
+ return ret;
+
+ ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring);
+ if (ret < 0)
+ return ret;
+
+ xdbc_mem_init();
+
+ /*
+ * Memory barrier to ensure hardware sees the bits
+ * setting above.
+ */
+ mmiowb();
+
+ ret = xdbc_start();
+ if (ret < 0) {
+ /* give the shared port back to host */
+ writel(0, &xdbc.xdbc_reg->control);
+ return ret;
+ }
+
+ xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
+
+ xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+
+ return 0;
+}
+
+int __init early_xdbc_parse_parameter(char *s)
+{
+ unsigned long dbgp_num = 0;
+ u32 bus, dev, func, offset;
+ int ret;
+
+ if (!early_pci_allowed())
+ return -EPERM;
+
+ if (strstr(s, "keep"))
+ early_console_keep = 1;
+
+ if (xdbc.xdbc_reg)
+ return 0;
+
+ if (*s && kstrtoul(s, 0, &dbgp_num))
+ dbgp_num = 0;
+
+ pr_notice("dbgp_num: %lu\n", dbgp_num);
+
+ /* Locate the host controller */
+ ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func);
+ if (ret) {
+ pr_notice("no host controller found in your system\n");
+ return -ENODEV;
+ }
+ xdbc.vendor = read_pci_config_16(bus, dev, func, PCI_VENDOR_ID);
+ xdbc.device = read_pci_config_16(bus, dev, func, PCI_DEVICE_ID);
+ xdbc.bus = bus;
+ xdbc.dev = dev;
+ xdbc.func = func;
+
+ /* Map the IO memory */
+ xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func);
+ if (!xdbc.xhci_base)
+ return -EINVAL;
+
+ /* Locate DbC registers */
+ offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
+ if (!offset) {
+ pr_notice("DbC wasn't found in your host controller\n");
+ early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
+ xdbc.xhci_base = NULL;
+ xdbc.xhci_length = 0;
+
+ return -ENODEV;
+ }
+ xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
+
+ return 0;
+}
+
+int __init early_xdbc_setup_hardware(void)
+{
+ int ret;
+
+ if (!xdbc.xdbc_reg)
+ return -ENODEV;
+
+ /* hand over the owner of host from BIOS */
+ xdbc_bios_handoff();
+
+ spin_lock_init(&xdbc.lock);
+
+ ret = xdbc_early_setup();
+ if (ret) {
+ pr_notice("failed to setup DbC hardware\n");
+
+ xdbc_free_ring(&xdbc.evt_ring);
+ xdbc_free_ring(&xdbc.out_ring);
+ xdbc_free_ring(&xdbc.in_ring);
+
+ if (xdbc.table_dma)
+ free_bootmem(xdbc.table_dma, PAGE_SIZE);
+
+ if (xdbc.out_dma)
+ free_bootmem(xdbc.out_dma, PAGE_SIZE);
+
+ xdbc.table_base = NULL;
+ xdbc.out_buf = NULL;
+ }
+
+ return ret;
+}
+
+static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
+{
+ u32 port_reg;
+
+ port_reg = readl(&xdbc.xdbc_reg->portsc);
+
+ if (port_reg & PORTSC_CSC) {
+ xdbc_trace("%s: connect status change event\n", __func__);
+ writel(port_reg | PORTSC_CSC, &xdbc.xdbc_reg->portsc);
+ port_reg = readl(&xdbc.xdbc_reg->portsc);
+ }
+
+ if (port_reg & PORTSC_PRC) {
+ xdbc_trace("%s: port reset change event\n", __func__);
+ writel(port_reg | PORTSC_PRC, &xdbc.xdbc_reg->portsc);
+ port_reg = readl(&xdbc.xdbc_reg->portsc);
+ }
+
+ if (port_reg & PORTSC_PLC) {
+ xdbc_trace("%s: port link status change event\n", __func__);
+ writel(port_reg | PORTSC_PLC, &xdbc.xdbc_reg->portsc);
+ port_reg = readl(&xdbc.xdbc_reg->portsc);
+ }
+
+ if (port_reg & PORTSC_CEC) {
+ xdbc_trace("%s: config error change\n", __func__);
+ writel(port_reg | PORTSC_CEC, &xdbc.xdbc_reg->portsc);
+ port_reg = readl(&xdbc.xdbc_reg->portsc);
+ }
+}
+
+static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
+{
+ u32 comp_code;
+ size_t remain_length;
+ int ep_id;
+
+ comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
+ remain_length = EVENT_TRB_LEN(le32_to_cpu(evt_trb->field[2]));
+ ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
+
+ /*
+ * Possible Completion Codes for DbC Transfer Event are Success,
+ * Stall Error, USB Transaction Error, Babble Detected Error,
+ * TRB Error, Short Packet, Undefined Error, Event Ring Full Error,
+ * and Vendor Defined Error. TRB error, undefined error and vendor
+ * defined error will result in HOT/HIT set and be handled the same
+ * way as Stall error.
+ */
+ switch (comp_code) {
+ case COMP_SUCCESS:
+ remain_length = 0;
+ case COMP_SHORT_TX:
+ xdbc_trace("%s: endpoint %d remains %ld bytes\n", __func__,
+ ep_id, remain_length);
+ break;
+ case COMP_TRB_ERR:
+ case COMP_BABBLE:
+ case COMP_TX_ERR:
+ case COMP_STALL:
+ default:
+ if (ep_id == XDBC_EPID_OUT)
+ xdbc.flags |= XDBC_FLAGS_BULKOUT_STALL;
+ if (ep_id == XDBC_EPID_IN)
+ xdbc.flags |= XDBC_FLAGS_BULKIN_STALL;
+
+ xdbc_trace("%s: endpoint %d stalled\n", __func__, ep_id);
+ break;
+ }
+
+ if (ep_id == XDBC_EPID_IN) {
+ xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
+ xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+ } else if (ep_id == XDBC_EPID_OUT) {
+ xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
+ } else {
+ xdbc_trace("%s: invalid endpoint id %d\n", __func__, ep_id);
+ }
+}
+
+static void xdbc_handle_events(void)
+{
+ struct xdbc_trb *evt_trb;
+ bool update_erdp = false;
+ u8 command;
+ u32 reg;
+
+ command = read_pci_config_byte(xdbc.bus, xdbc.dev,
+ xdbc.func, PCI_COMMAND);
+ if (!(command & PCI_COMMAND_MASTER)) {
+ command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+ write_pci_config_byte(xdbc.bus, xdbc.dev,
+ xdbc.func, PCI_COMMAND, command);
+ }
+
+ /* check and handle configure-exit event */
+ reg = readl(&xdbc.xdbc_reg->control);
+ if (reg & CTRL_DRC) {
+ xdbc.flags &= ~XDBC_FLAGS_CONFIGURED;
+ writel(reg | CTRL_DRC, &xdbc.xdbc_reg->control);
+ } else {
+ xdbc.flags |= XDBC_FLAGS_CONFIGURED;
+ }
+
+ /* check endpoint stall event */
+ reg = readl(&xdbc.xdbc_reg->control);
+ if (reg & CTRL_HIT) {
+ xdbc.flags |= XDBC_FLAGS_BULKIN_STALL;
+ } else {
+ xdbc.flags &= ~XDBC_FLAGS_BULKIN_STALL;
+ if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS))
+ xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
+ }
+
+ if (reg & CTRL_HOT)
+ xdbc.flags |= XDBC_FLAGS_BULKOUT_STALL;
+ else
+ xdbc.flags &= ~XDBC_FLAGS_BULKOUT_STALL;
+
+ evt_trb = xdbc.evt_ring.dequeue;
+ while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) ==
+ xdbc.evt_ring.cycle_state) {
+ /*
+ * Memory barrier to ensure software sees the trbs
+ * enqueued by hardware.
+ */
+ rmb();
+
+ switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) {
+ case TRB_TYPE(TRB_PORT_STATUS):
+ xdbc_handle_port_status(evt_trb);
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ xdbc_handle_tx_event(evt_trb);
+ break;
+ default:
+ break;
+ }
+
+ /* advance to the next trb */
+ ++(xdbc.evt_ring.dequeue);
+ if (xdbc.evt_ring.dequeue ==
+ &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) {
+ xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs;
+ xdbc.evt_ring.cycle_state ^= 1;
+ }
+
+ evt_trb = xdbc.evt_ring.dequeue;
+ update_erdp = true;
+ }
+
+ /* update event ring dequeue pointer */
+ if (update_erdp)
+ xdbc_write64(__pa(xdbc.evt_ring.dequeue),
+ &xdbc.xdbc_reg->erdp);
+}
+
+static int xdbc_bulk_write(const char *bytes, int size)
+{
+ unsigned long flags;
+ int ret, timeout = 0;
+
+ spin_lock_irqsave(&xdbc.lock, flags);
+
+ xdbc_handle_events();
+
+ /* Check completion of the previous request. */
+ while (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) {
+ if (timeout > 1000000)
+ break;
+
+ spin_unlock_irqrestore(&xdbc.lock, flags);
+ xdbc_delay(100);
+ spin_lock_irqsave(&xdbc.lock, flags);
+ timeout += 100;
+
+ xdbc_handle_events();
+ }
+
+ if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) {
+ spin_unlock_irqrestore(&xdbc.lock, flags);
+
+ /*
+ * Oops, hardware wasn't able to complete the
+ * previous transfer.
+ */
+ xdbc_trace("oops: previous transfer not completed yet\n");
+
+ return -ETIMEDOUT;
+ }
+
+ ret = xdbc_bulk_transfer((void *)bytes, size, false);
+
+ spin_unlock_irqrestore(&xdbc.lock, flags);
+
+ return ret;
+}
+
+static void early_xdbc_write(struct console *con, const char *str, u32 n)
+{
+ int chunk, ret;
+ static char buf[XDBC_MAX_PACKET];
+ int use_cr = 0;
+
+ if (!xdbc.xdbc_reg)
+ return;
+ memset(buf, 0, XDBC_MAX_PACKET);
+ while (n > 0) {
+ for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0;
+ str++, chunk++, n--) {
+ if (!use_cr && *str == '\n') {
+ use_cr = 1;
+ buf[chunk] = '\r';
+ str--;
+ n++;
+ continue;
+ }
+ if (use_cr)
+ use_cr = 0;
+ buf[chunk] = *str;
+ }
+ if (chunk > 0) {
+ ret = xdbc_bulk_write(buf, chunk);
+ if (ret < 0)
+ break;
+ }
+ }
+}
+
+static struct console early_xdbc_console = {
+ .name = "earlyxdbc",
+ .write = early_xdbc_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+void __init early_xdbc_register_console(void)
+{
+ if (early_console)
+ return;
+
+ early_console = &early_xdbc_console;
+ if (early_console_keep)
+ early_console->flags &= ~CON_BOOT;
+ else
+ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+}
+
+static void xdbc_scrub_function(struct work_struct *work)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&xdbc.lock, flags);
+
+ /*
+ * DbC is running, check the event ring and
+ * handle the events.
+ */
+ if (readl(&xdbc.xdbc_reg->control) & CTRL_DRC)
+ xdbc_handle_events();
+
+ /*
+ * External reset happened. Need to restart the
+ * debugging hardware.
+ */
+ if (unlikely(!(readl(&xdbc.xdbc_reg->control) & CTRL_DCE)))
+ xdbc_handle_external_reset();
+
+ spin_unlock_irqrestore(&xdbc.lock, flags);
+
+ queue_delayed_work(xdbc_wq, &xdbc.scrub, usecs_to_jiffies(100));
+}
+
+static int __init xdbc_init(void)
+{
+ void __iomem *base;
+ u32 offset;
+ int ret = 0;
+
+ if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
+ return 0;
+
+ xdbc_delay = xdbc_runtime_delay;
+
+ /*
+ * It's time to shutdown DbC, so that the debug
+ * port could be reused by the host controller.
+ */
+ if (early_xdbc_console.index == -1 ||
+ (early_xdbc_console.flags & CON_BOOT)) {
+ xdbc_trace("hardware not used any more\n");
+ goto free_and_quit;
+ }
+
+ base = ioremap_nocache(xdbc.xhci_start, xdbc.xhci_length);
+ if (!base) {
+ xdbc_trace("failed to remap the io address\n");
+ ret = -ENOMEM;
+ goto free_and_quit;
+ }
+
+ early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
+ xdbc_trace("early mapped IO address released\n");
+
+ xdbc.xhci_base = base;
+ offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
+ xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
+
+ if (xdbc.flags & XDBC_FLAGS_INITIALIZED) {
+ xdbc_wq = create_singlethread_workqueue("xdbc_wq");
+
+ INIT_DELAYED_WORK(&xdbc.scrub, xdbc_scrub_function);
+ queue_delayed_work(xdbc_wq, &xdbc.scrub, usecs_to_jiffies(100));
+ }
+
+ return 0;
+
+free_and_quit:
+ xdbc_free_ring(&xdbc.evt_ring);
+ xdbc_free_ring(&xdbc.out_ring);
+ xdbc_free_ring(&xdbc.in_ring);
+ free_bootmem(xdbc.table_dma, PAGE_SIZE);
+ free_bootmem(xdbc.out_dma, PAGE_SIZE);
+ writel(0, &xdbc.xdbc_reg->control);
+ early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
+
+ return ret;
+}
+subsys_initcall(xdbc_init);
diff --git a/drivers/usb/early/xhci-dbc.h b/drivers/usb/early/xhci-dbc.h
new file mode 100644
index 0000000..3aaf0bb
--- /dev/null
+++ b/drivers/usb/early/xhci-dbc.h
@@ -0,0 +1,206 @@
+/*
+ * xhci-dbc.h - xHCI debug capability early driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_XHCI_DBC_H
+#define __LINUX_XHCI_DBC_H
+
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+/**
+ * struct xdbc_regs - xHCI Debug Capability Register interface.
+ */
+struct xdbc_regs {
+ __le32 capability;
+ __le32 doorbell;
+ __le32 ersts; /* Event Ring Segment Table Size*/
+ __le32 rvd0; /* 0c~0f reserved bits */
+ __le64 erstba; /* Event Ring Segment Table Base Address */
+ __le64 erdp; /* Event Ring Dequeue Pointer */
+ __le32 control;
+#define DEBUG_MAX_BURST(p) (((p) >> 16) & 0xff)
+#define CTRL_DCR BIT(0) /* DbC Run */
+#define CTRL_PED BIT(1) /* Port Enable/Disable */
+#define CTRL_HOT BIT(2) /* Halt Out TR */
+#define CTRL_HIT BIT(3) /* Halt In TR */
+#define CTRL_DRC BIT(4) /* DbC run change */
+#define CTRL_DCE BIT(31) /* DbC enable */
+ __le32 status;
+#define DCST_DPN(p) (((p) >> 24) & 0xff)
+ __le32 portsc; /* Port status and control */
+#define PORTSC_CCS BIT(0)
+#define PORTSC_CSC BIT(17)
+#define PORTSC_PRC BIT(21)
+#define PORTSC_PLC BIT(22)
+#define PORTSC_CEC BIT(23)
+ __le32 rvd1; /* 2b~28 reserved bits */
+ __le64 dccp; /* Debug Capability Context Pointer */
+ __le32 devinfo1; /* Device Descriptor Info Register 1 */
+ __le32 devinfo2; /* Device Descriptor Info Register 2 */
+};
+
+/*
+ * xHCI Debug Capability data structures
+ */
+struct xdbc_trb {
+ __le32 field[4];
+};
+
+struct xdbc_erst_entry {
+ __le64 seg_addr;
+ __le32 seg_size;
+ __le32 rsvd;
+};
+
+struct xdbc_info_context {
+ __le64 string0;
+ __le64 manufacture;
+ __le64 product;
+ __le64 serial;
+ __le32 length;
+ __le32 rsvdz[7];
+};
+
+struct xdbc_ep_context {
+ __le32 ep_info1;
+ __le32 ep_info2;
+ __le64 deq;
+ __le32 tx_info;
+ __le32 rsvd0[11];
+};
+
+struct xdbc_context {
+ struct xdbc_info_context info;
+ struct xdbc_ep_context out;
+ struct xdbc_ep_context in;
+};
+
+#define XDBC_INFO_CONTEXT_SIZE 48
+
+#define XDBC_MAX_STRING_LENGTH 64
+#define XDBC_STRING_MANUFACTURE "Linux"
+#define XDBC_STRING_PRODUCT "Remote GDB"
+#define XDBC_STRING_SERIAL "0001"
+struct xdbc_strings {
+ char string0[XDBC_MAX_STRING_LENGTH];
+ char manufacture[XDBC_MAX_STRING_LENGTH];
+ char product[XDBC_MAX_STRING_LENGTH];
+ char serial[XDBC_MAX_STRING_LENGTH];
+};
+
+#define XDBC_PROTOCOL 1 /* GNU Remote Debug Command Set */
+#define XDBC_VENDOR_ID 0x1d6b /* Linux Foundation 0x1d6b */
+#define XDBC_PRODUCT_ID 0x0004 /* __le16 idProduct; device 0004 */
+#define XDBC_DEVICE_REV 0x0010 /* 0.10 */
+
+/*
+ * software state structure
+ */
+struct xdbc_segment {
+ struct xdbc_trb *trbs;
+ dma_addr_t dma;
+};
+
+#define XDBC_TRBS_PER_SEGMENT 256
+
+struct xdbc_ring {
+ struct xdbc_segment *segment;
+ struct xdbc_trb *enqueue;
+ struct xdbc_trb *dequeue;
+ u32 cycle_state;
+};
+
+#define XDBC_EPID_OUT 2
+#define XDBC_EPID_IN 3
+
+struct xdbc_state {
+ /* pci device info*/
+ u16 vendor;
+ u16 device;
+ u32 bus;
+ u32 dev;
+ u32 func;
+ void __iomem *xhci_base;
+ u64 xhci_start;
+ size_t xhci_length;
+ int port_number;
+#define XDBC_PCI_MAX_BUSES 256
+#define XDBC_PCI_MAX_DEVICES 32
+#define XDBC_PCI_MAX_FUNCTION 8
+
+ /* DbC register base */
+ struct xdbc_regs __iomem *xdbc_reg;
+
+ /* DbC table page */
+ dma_addr_t table_dma;
+ void *table_base;
+
+#define XDBC_TABLE_ENTRY_SIZE 64
+#define XDBC_ERST_ENTRY_NUM 1
+#define XDBC_DBCC_ENTRY_NUM 3
+#define XDBC_STRING_ENTRY_NUM 4
+
+ /* event ring segment table */
+ dma_addr_t erst_dma;
+ size_t erst_size;
+ void *erst_base;
+
+ /* event ring segments */
+ struct xdbc_ring evt_ring;
+ struct xdbc_segment evt_seg;
+
+ /* debug capability contexts */
+ dma_addr_t dbcc_dma;
+ size_t dbcc_size;
+ void *dbcc_base;
+
+ /* descriptor strings */
+ dma_addr_t string_dma;
+ size_t string_size;
+ void *string_base;
+
+ /* bulk OUT endpoint */
+ struct xdbc_ring out_ring;
+ struct xdbc_segment out_seg;
+ void *out_buf;
+ dma_addr_t out_dma;
+
+ /* bulk IN endpoint */
+ struct xdbc_ring in_ring;
+ struct xdbc_segment in_seg;
+ void *in_buf;
+ dma_addr_t in_dma;
+
+ /* locks to serialize access to hardware */
+ spinlock_t lock;
+ u32 flags;
+#define XDBC_FLAGS_INITIALIZED BIT(0)
+#define XDBC_FLAGS_BULKIN_STALL BIT(1)
+#define XDBC_FLAGS_BULKOUT_STALL BIT(2)
+#define XDBC_FLAGS_IN_PROCESS BIT(3)
+#define XDBC_FLAGS_OUT_PROCESS BIT(4)
+#define XDBC_FLAGS_CONFIGURED BIT(5)
+
+ struct delayed_work scrub;
+};
+
+#define XDBC_MAX_PACKET 1024
+
+/* door bell target */
+#define OUT_EP_DOORBELL 0
+#define IN_EP_DOORBELL 1
+#define DOOR_BELL_TARGET(p) (((p) & 0xff) << 8)
+
+#define xdbc_read64(regs) xhci_read_64(NULL, (regs))
+#define xdbc_write64(val, regs) xhci_write_64(NULL, (val), (regs))
+
+#endif /* __LINUX_XHCI_DBC_H */
diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h
new file mode 100644
index 0000000..aa9441f
--- /dev/null
+++ b/include/linux/usb/xhci-dbgp.h
@@ -0,0 +1,22 @@
+/*
+ * Standalone xHCI debug capability driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_XHCI_DBGP_H
+#define __LINUX_XHCI_DBGP_H
+
+#ifdef CONFIG_EARLY_PRINTK_XDBC
+int __init early_xdbc_parse_parameter(char *s);
+int __init early_xdbc_setup_hardware(void);
+void __init early_xdbc_register_console(void);
+#endif /* CONFIG_EARLY_PRINTK_XDBC */
+
+#endif /* __LINUX_XHCI_DBGP_H */
--
2.1.4