[PATCH 2/2] Synopsys USB 2.0 Device Controller (UDC) Driver

From: Raviteja Garimella
Date: Wed Nov 30 2016 - 01:06:07 EST


This is driver for Synopsys Designware Cores USB Device
Controller (UDC) Subsystem with the AMBA Advanced High-Performance
Bus (AHB). This driver works with Synopsys UDC20 products.

Signed-off-by: Raviteja Garimella <raviteja.garimella@xxxxxxxxxxxx>
---
drivers/usb/gadget/udc/Kconfig | 12 +
drivers/usb/gadget/udc/Makefile | 1 +
drivers/usb/gadget/udc/snps_udc.c | 1751 +++++++++++++++++++++++++++++++++++++
drivers/usb/gadget/udc/snps_udc.h | 1071 +++++++++++++++++++++++
4 files changed, 2835 insertions(+)
create mode 100644 drivers/usb/gadget/udc/snps_udc.c
create mode 100644 drivers/usb/gadget/udc/snps_udc.h

diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 658b8da..28cd679 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -239,6 +239,18 @@ config USB_MV_U3D
MARVELL PXA2128 Processor series include a super speed USB3.0 device
controller, which support super speed USB peripheral.

+config USB_SNP_UDC
+ tristate "Synopsys USB 2.0 Device controller"
+ select USB_GADGET_DUALSPEED
+ depends on (ARM || ARM64) && USB_GADGET
+ default ARCH_BCM_IPROC
+ help
+ This adds Device support for Synopsys Designware core
+ AHB subsystem USB2.0 Device Controller(UDC) .
+
+ This driver works with Synopsys UDC20 products.
+ If unsure, say N.
+
#
# Controllers available in both integrated and discrete versions
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 98e74ed..2b63a2b 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -36,4 +36,5 @@ obj-$(CONFIG_USB_FOTG210_UDC) += fotg210-udc.o
obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o
obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
+obj-$(CONFIG_USB_SNP_UDC) += snps_udc.o
obj-$(CONFIG_USB_BDC_UDC) += bdc/
diff --git a/drivers/usb/gadget/udc/snps_udc.c b/drivers/usb/gadget/udc/snps_udc.c
new file mode 100644
index 0000000..d8c46ce
--- /dev/null
+++ b/drivers/usb/gadget/udc/snps_udc.c
@@ -0,0 +1,1751 @@
+/*
+ * snps_udc.c - Synopsys USB 2.0 Device Controller driver
+ *
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/extcon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/version.h>
+#include "snps_udc.h"
+
+#define DRIVER_DESC "Driver for Synopsys Designware core UDC"
+
+static void ep0_setup_init(struct snps_udc_ep *ep, int status)
+{
+ struct snps_udc *udc = ep->udc;
+
+ ep->dma.virt->setup.status = DMA_STS_BUF_HOST_READY;
+ ep->dirn = USB_DIR_OUT;
+ ep->stopped = 0;
+
+ if (!status) {
+ clear_ep_nak(udc->regs, ep->num, USB_DIR_OUT);
+ clear_ep_nak(udc->regs, ep->num, USB_DIR_IN);
+ } else {
+ enable_ep_stall(udc->regs, ep->num, USB_DIR_IN);
+ enable_ep_stall(udc->regs, ep->num, USB_DIR_OUT);
+ }
+
+ enable_udc_ep_irq(udc->regs, ep->num, USB_DIR_OUT);
+ enable_ep_dma(udc->regs, ep->num, USB_DIR_OUT);
+
+ dev_dbg(udc->dev, "%s setup buffer initialized\n", ep->name);
+}
+
+static void ep_dma_init(struct snps_udc_ep *ep)
+{
+ struct snps_udc *udc = ep->udc;
+ u32 desc_cnt = (DESC_CNT - 1);
+ u32 i;
+
+ ep->dma.virt = &ep->udc->dma.virt->ep[ep->num];
+ ep->dma.phys = &ep->udc->dma.phys->ep[ep->num];
+
+ ep->dma.virt->setup.status = DMA_STS_BUF_HOST_BUSY;
+ set_setup_buf_ptr(udc->regs, ep->num, USB_DIR_OUT,
+ &ep->dma.phys->setup);
+
+ for (i = 0; i < DESC_CNT; i++) {
+ ep->dma.virt->desc[i].status = DMA_STS_BUF_HOST_BUSY;
+ ep->dma.virt->desc[i].next_desc_addr =
+ (dma_addr_t)&ep->dma.phys->desc[i + 1];
+ }
+ ep->dma.virt->desc[desc_cnt].next_desc_addr =
+ (dma_addr_t)&ep->dma.phys->desc[0];
+
+ set_data_desc_ptr(udc->regs, ep->num, USB_DIR_OUT,
+ &ep->dma.phys->desc[0]);
+ set_data_desc_ptr(udc->regs, ep->num, USB_DIR_IN,
+ &ep->dma.phys->desc[0]);
+
+ dev_dbg(udc->dev, " %s dma initialized\n", ep->name);
+}
+
+static void ep_data_dma_init(struct snps_udc_ep *ep)
+{
+ struct ep_xfer_req *ep_req;
+
+ dev_dbg(ep->udc->dev, "enter: %s\n", __func__);
+
+ ep_req = list_first_entry(&ep->queue, struct ep_xfer_req, queue);
+
+ if (ep_req->dma_aligned) {
+ ep_req->dma_addr_orig = ep_req->usb_req.dma;
+ ep_req->usb_req.dma = ep->dma.aligned_addr;
+ if (ep->dirn == USB_DIR_IN)
+ memcpy(ep->dma.aligned_buf, ep_req->usb_req.buf,
+ ep_req->usb_req.length);
+ }
+
+ ep->dma.done = 0;
+ ep->dma.len_done = 0;
+ ep->dma.len_rem = ep->dma.usb_req->length;
+ ep->dma.buf_addr = ep->dma.usb_req->dma;
+ ep->dma.status = DMA_STS_RX_SUCCESS;
+
+ if ((ep->dirn == USB_DIR_IN) &&
+ (ep->type != USB_ENDPOINT_XFER_ISOC)) {
+ if (in_bf_mode)
+ ep->dma.len_max = ep->dma.usb_req->length;
+ else
+ ep->dma.len_max = ep->usb_ep.maxpacket;
+ } else {
+ if (out_bf_mode)
+ ep->dma.len_max = ep->dma.usb_req->length;
+ else
+ ep->dma.len_max = ep->usb_ep.maxpacket;
+ }
+
+ dma_desc_chain_reset(ep);
+}
+
+static void ep_data_dma_finish(struct snps_udc_ep *ep)
+{
+ struct snps_udc *udc = ep->udc;
+ struct ep_xfer_req *ep_req;
+
+ disable_udc_ep_irq(udc->regs, ep->num, ep->dirn);
+ disable_ep_dma(udc->regs, ep->num, ep->dirn);
+
+ ep_req = list_first_entry(&ep->queue, struct ep_xfer_req, queue);
+
+ if (ep_req->dma_aligned) {
+ if (ep->dirn == USB_DIR_OUT)
+ memcpy(ep_req->usb_req.buf,
+ ep->dma.aligned_buf, ep_req->usb_req.length);
+ ep_req->usb_req.dma = ep_req->dma_addr_orig;
+ }
+ dev_dbg(udc->dev, "%s dma finished\n", ep->name);
+}
+
+static void ep_data_dma_add(struct snps_udc_ep *ep)
+{
+ struct data_desc *desc = NULL;
+ u32 status;
+ u32 len;
+
+ if (!ep->dma.len_rem)
+ ep->dma.usb_req->zero = 1;
+
+ ep->dma.last = ep->dma.usb_req->zero;
+
+ while (!dma_desc_chain_is_full(ep) &&
+ (ep->dma.len_rem || ep->dma.usb_req->zero)) {
+ desc = dma_desc_chain_alloc(ep);
+ len = (ep->dma.len_rem < ep->dma.len_max) ?
+ ep->dma.len_rem : ep->dma.len_max;
+ ep->dma.len_rem -= len;
+ status = 0;
+
+ if (len <= ep->dma.len_max ||
+ (out_bf_mode && (len <= ep->dma.len_max))) {
+ if (in_bf_mode ||
+ !((ep->dirn == USB_DIR_IN) &&
+ (ep->type == USB_ENDPOINT_XFER_BULK) &&
+ (len != 0) &&
+ (len % ep->usb_ep.maxpacket == 0)))
+ ep->dma.usb_req->zero = 0;
+ }
+
+ if ((ep->dirn == USB_DIR_IN) &&
+ (ep->type == USB_ENDPOINT_XFER_ISOC)) {
+ ep->dma.frame_num += ep->dma.frame_incr;
+ dev_dbg(ep->udc->dev, "%s: DMA started: frame_num=%d.%d\n",
+ ep->name, (ep->dma.frame_num >> 3),
+ (ep->dma.frame_num & 0x7));
+ status |= ((ep->dma.frame_num <<
+ DMA_STS_FRAME_NUM_SHIFT)
+ & DMA_STS_FRAME_NUM_MASK);
+ }
+
+ desc->buf_addr = ep->dma.buf_addr;
+ status |= (len << DMA_STS_BYTE_CNT_SHIFT);
+ desc->status = status | DMA_STS_BUF_HOST_READY;
+ /* Ensure all writes are done before going for next descriptor*/
+ wmb();
+ ep->dma.buf_addr += len;
+
+ if ((ep->dirn == USB_DIR_IN) &&
+ (ep->type == USB_ENDPOINT_XFER_ISOC))
+ break;
+ }
+
+ if (desc)
+ desc->status |= DMA_STS_LAST_DESC;
+
+ dev_dbg(ep->udc->dev, "%s dma data added\n", ep->name);
+}
+
+static void ep_data_dma_remove(struct snps_udc_ep *ep)
+{
+ struct data_desc *desc;
+ u32 status;
+ u32 len = 0;
+
+ while (!dma_desc_chain_is_empty(ep)) {
+ desc = dma_desc_chain_head(ep);
+ status = desc->status;
+ desc->status = DMA_STS_BUF_HOST_BUSY;
+ /* Ensure all writes are done before going for next descriptor*/
+ wmb();
+ len = (status & DMA_STS_NISO_BYTE_CNT_MASK) >>
+ DMA_STS_NISO_BYTE_CNT_SHIFT;
+
+ if ((ep->dirn == USB_DIR_IN) || (status &
+ DMA_STS_LAST_DESC)) {
+ ep->dma.len_done += len;
+ ep->dma.usb_req->actual += len;
+ }
+
+ if ((status & DMA_STS_RX_MASK) != DMA_STS_RX_SUCCESS) {
+ ep->dma.status = status & DMA_STS_RX_MASK;
+ ep->dma.usb_req->status = -EIO;
+ dev_warn(ep->udc->dev, "%s: DMA error\n", ep->name);
+ }
+
+ if ((ep->dirn == USB_DIR_IN) &&
+ (ep->type == USB_ENDPOINT_XFER_ISOC)) {
+ if (ep->dma.usb_req->actual ==
+ ep->dma.usb_req->length)
+ ep->dma.usb_req->status = 0;
+ dma_desc_chain_reset(ep);
+ } else {
+ dma_desc_chain_free(ep);
+ }
+ }
+
+ if ((!ep->dma.len_rem || (len < ep->usb_ep.maxpacket)) &&
+ (ep->dma.usb_req->status == -EINPROGRESS))
+ ep->dma.usb_req->status = 0;
+
+ dev_dbg(ep->udc->dev, "%s dma data removed\n", ep->name);
+}
+
+static int fifo_ram_alloc(struct snps_udc_ep *ep, u32 max_pkt_size)
+{
+ u32 rx_cnt;
+ u32 tx_cnt;
+
+ switch (EP_DIRN_TYPE(ep->dirn, ep->type)) {
+ case EP_DIRN_TYPE(USB_DIR_OUT, USB_ENDPOINT_XFER_BULK):
+ case EP_DIRN_TYPE(USB_DIR_OUT, USB_ENDPOINT_XFER_INT):
+ case EP_DIRN_TYPE(USB_DIR_OUT, USB_ENDPOINT_XFER_ISOC):
+ rx_cnt = FIFO_SZ_U8(max_pkt_size);
+ tx_cnt = 0;
+ break;
+
+ case EP_DIRN_TYPE(USB_DIR_IN, USB_ENDPOINT_XFER_BULK):
+ case EP_DIRN_TYPE(USB_DIR_IN, USB_ENDPOINT_XFER_INT):
+ rx_cnt = 0;
+ tx_cnt = FIFO_SZ_U8(max_pkt_size);
+ break;
+
+ case EP_DIRN_TYPE(USB_DIR_IN, USB_ENDPOINT_XFER_ISOC):
+ rx_cnt = 0;
+ tx_cnt = 2 * FIFO_SZ_U8(max_pkt_size);
+ break;
+
+ case EP_DIRN_TYPE(USB_DIR_IN, USB_ENDPOINT_XFER_CONTROL):
+ case EP_DIRN_TYPE(USB_DIR_OUT, USB_ENDPOINT_XFER_CONTROL):
+ rx_cnt = FIFO_SZ_U8(max_pkt_size);
+ tx_cnt = rx_cnt;
+ break;
+
+ default:
+ dev_err(ep->udc->dev, "%s: invalid EP attributes\n", ep->name);
+ return -ENODEV;
+ }
+
+ dev_dbg(ep->udc->dev, "rx req=%u free=%u: tx req=%u free=%u\n",
+ rx_cnt, ep->udc->rx_fifo_space, tx_cnt, ep->udc->tx_fifo_space);
+
+ if ((ep->udc->rx_fifo_space < rx_cnt) ||
+ (ep->udc->tx_fifo_space < tx_cnt)) {
+ dev_err(ep->udc->dev, "%s: fifo alloc failed\n", ep->name);
+ return -ENOSPC;
+ }
+
+ ep->rx_fifo_size = rx_cnt;
+ ep->tx_fifo_size = tx_cnt;
+
+ if (mrx_fifo)
+ ep->udc->rx_fifo_space -= rx_cnt;
+
+ ep->udc->tx_fifo_space -= tx_cnt;
+
+ return 0;
+}
+
+static void fifo_ram_free(struct snps_udc_ep *ep)
+{
+ if (mrx_fifo)
+ ep->udc->rx_fifo_space += ep->rx_fifo_size;
+
+ ep->udc->tx_fifo_space += ep->tx_fifo_size;
+
+ ep->rx_fifo_size = 0;
+ ep->tx_fifo_size = 0;
+}
+
+static int ep_cfg(struct snps_udc_ep *ep, u32 type,
+ u32 max_pkt_size)
+{
+ struct snps_udc *udc = ep->udc;
+
+ ep->type = type;
+ if (fifo_ram_alloc(ep, max_pkt_size) != 0)
+ return -ENOSPC;
+
+ ep->type = type;
+ ep->usb_ep.maxpacket = max_pkt_size;
+
+ if (ep->udc->conn_type)
+ init_ep_reg(udc->regs, ep->num, ep->type, ep->dirn,
+ max_pkt_size);
+ dev_dbg(udc->dev, "ep_cfg: %s: type=%u dirn=0x%x pkt=%u\n",
+ ep->usb_ep.name, type, ep->dirn, max_pkt_size);
+
+ return 0;
+}
+
+static void epreq_xfer_done(struct snps_udc_ep *ep,
+ struct ep_xfer_req *ep_req, int status)
+{
+ struct snps_udc *udc = ep->udc;
+ u32 stopped;
+
+ list_del_init(&ep_req->queue);
+
+ if (ep_req->usb_req.status == -EINPROGRESS)
+ ep_req->usb_req.status = status;
+
+ if (ep_req->dma_aligned) {
+ ep_req->dma_aligned = 0;
+ } else if (ep_req->dma_mapped) {
+ dma_unmap_single(ep->udc->gadget.dev.parent,
+ ep_req->usb_req.dma,
+ (ep_req->usb_req.length ?
+ ep_req->usb_req.length : 1),
+ (ep->dirn == USB_DIR_IN ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE));
+ ep_req->dma_mapped = 0;
+ ep_req->usb_req.dma = DMA_ADDR_INVALID;
+ }
+
+ dev_dbg(udc->dev, "%s xfer done req=0x%p buf=0x%p len=%d actual=%d\n",
+ ep->name, &ep_req->usb_req, ep_req->usb_req.buf,
+ ep_req->usb_req.length, ep_req->usb_req.actual);
+
+ stopped = ep->stopped;
+ ep->stopped = 1;
+ spin_unlock(&ep->udc->lock);
+ ep_req->usb_req.complete(&ep->usb_ep, &ep_req->usb_req);
+ spin_lock(&ep->udc->lock);
+ ep->stopped = stopped;
+}
+
+static void epreq_xfer_process(struct snps_udc_ep *ep)
+{
+ struct snps_udc *udc = ep->udc;
+ struct ep_xfer_req *ep_req;
+
+ dev_dbg(udc->dev, "%s: xfer request\n", ep->name);
+
+ if (!ep->dma.usb_req) {
+ dev_dbg(udc->dev, "%s: No dma usb request\n", ep->name);
+ return;
+ }
+
+ disable_ep_dma(udc->regs, ep->num, ep->dirn);
+ ep_data_dma_remove(ep);
+
+ if (ep->dma.usb_req->status != -EINPROGRESS) {
+ ep_data_dma_finish(ep);
+
+ if ((ep->type == USB_ENDPOINT_XFER_CONTROL) &&
+ (ep->dirn == USB_DIR_IN) &&
+ (ep->dma.usb_req->status == 0)) {
+ ep->dirn = USB_DIR_OUT;
+ ep->b_ep_addr = ep->num | ep->dirn;
+ ep->dma.usb_req->status = -EINPROGRESS;
+ ep->dma.usb_req->actual = 0;
+ ep->dma.usb_req->length = 0;
+ ep_data_dma_init(ep);
+ } else {
+ if (in_bf_mode && is_ep_in() && is_ep_bulk() &&
+ (ep->dma.usb_req->length != 0) &&
+ (ep->dma.usb_req->length %
+ ep->usb_ep.maxpacket == 0) &&
+ (ep->dma.last)) {
+ ep->dma.usb_req->status = -EINPROGRESS;
+ ep->dma.usb_req->actual = 0;
+ ep->dma.usb_req->length = 0;
+ } else if (!list_empty(&ep->queue))
+ epreq_xfer_done(ep,
+ list_first_entry(&ep->queue,
+ struct
+ ep_xfer_req,
+ queue), 0);
+
+ if (ep->type == USB_ENDPOINT_XFER_CONTROL)
+ ep0_setup_init(ep, 0);
+
+ if (is_ep_in() && is_ep_bulk() &&
+ !list_empty(&ep->queue)) {
+ ep->in_xfer_done = true;
+ clear_ep_nak(udc->regs, ep->num, ep->dirn);
+ enable_udc_ep_irq(udc->regs, ep->num, ep->dirn);
+ return;
+ }
+
+ if (list_empty(&ep->queue)) {
+ ep->dma.usb_req = NULL;
+ } else {
+ ep_req = list_first_entry(&ep->queue,
+ struct ep_xfer_req,
+ queue);
+ ep->dma.usb_req = &ep_req->usb_req;
+ ep_data_dma_init(ep);
+ }
+ }
+ }
+
+ if (ep->dma.usb_req) {
+ ep_data_dma_add(ep);
+ enable_udc_ep_irq(udc->regs, ep->num, ep->dirn);
+ clear_ep_nak(udc->regs, ep->num, ep->dirn);
+ enable_ep_dma(udc->regs, ep->num, ep->dirn);
+ }
+}
+
+static void epreq_xfer_error(struct snps_udc_ep *ep, int status)
+{
+ if (!ep->dma.usb_req) {
+ dev_err(ep->udc->dev, "%s: No DMA usb request\n", ep->name);
+ return;
+ }
+
+ ep->dma.usb_req->status = status;
+ epreq_xfer_process(ep);
+}
+
+static void epreq_xfer_add(struct snps_udc_ep *ep,
+ struct ep_xfer_req *ep_req)
+{
+ struct snps_udc *udc = ep->udc;
+
+ list_add_tail(&ep_req->queue, &ep->queue);
+ if (ep->stopped)
+ return;
+
+ if ((ep->dirn == USB_DIR_IN) &&
+ (ep->type == USB_ENDPOINT_XFER_ISOC) &&
+ (ep->dma.usb_req) &&
+ (ep->dma.frame_num == FRAME_NUM_INVALID)) {
+ ep_data_dma_finish(ep);
+ ep->dma.usb_req = NULL;
+ epreq_xfer_done(ep,
+ list_first_entry(&ep->queue,
+ struct ep_xfer_req,
+ queue),
+ -EREMOTEIO);
+ }
+
+ if (ep->dma.usb_req) {
+ dev_dbg(udc->dev, "%s: busy\n", ep->name);
+ } else if (!in_isoc_delay_disabled && (ep->dirn == USB_DIR_IN) &&
+ (ep->type == USB_ENDPOINT_XFER_ISOC) &&
+ (ep->dma.frame_num == FRAME_NUM_INVALID)) {
+ dev_dbg(udc->dev, "%s: ISOC delay xfer start\n", ep->name);
+ ep->dma.usb_req = &(list_first_entry(&ep->queue,
+ struct ep_xfer_req, queue))->usb_req;
+ ep_data_dma_init(ep);
+ clear_ep_nak(udc->regs, ep->num, ep->dirn);
+ enable_udc_ep_irq(udc->regs, ep->num, ep->dirn);
+
+ } else {
+ if (in_isoc_delay_disabled && (ep->dirn == USB_DIR_IN) &&
+ (ep->type == USB_ENDPOINT_XFER_ISOC) &&
+ (ep->dma.frame_num == FRAME_NUM_INVALID)) {
+ ep->dma.frame_num = get_last_rx_frnum(udc->regs);
+ }
+
+ if (is_ep_in() && is_ep_bulk() && !ep->dma.usb_req) {
+ ep->in_xfer_done = true;
+ clear_ep_nak(udc->regs, ep->num, ep->dirn);
+ enable_udc_ep_irq(udc->regs, ep->num, ep->dirn);
+ return;
+ }
+
+ ep_req = list_first_entry(&ep->queue,
+ struct ep_xfer_req, queue);
+ ep->dma.usb_req = &ep_req->usb_req;
+ ep_data_dma_init(ep);
+ ep_data_dma_add(ep);
+ enable_udc_ep_irq(udc->regs, ep->num, ep->dirn);
+ clear_ep_nak(udc->regs, ep->num, ep->dirn);
+ enable_ep_dma(udc->regs, ep->num, ep->dirn);
+ }
+
+ dev_dbg(udc->dev, "%s: xfer add ep request\n", ep->name);
+}
+
+static void epreq_queue_flush(struct snps_udc_ep *ep, int status)
+{
+ struct snps_udc *udc = ep->udc;
+ struct ep_xfer_req *ep_req;
+
+ ep->stopped = 1;
+
+ while (!list_empty(&ep->queue)) {
+ ep_req = list_first_entry(&ep->queue,
+ struct ep_xfer_req, queue);
+ epreq_xfer_done(ep, ep_req, status);
+ }
+
+ ep->dma.usb_req = NULL;
+ if ((is_ep_in() && is_ep_bulk()) || !ep->num) {
+ set_ep_fifo_flush(udc->regs, ep->num, ep->dirn);
+ clear_ep_fifo_flush(udc->regs, ep->num, ep->dirn);
+ }
+
+ dev_dbg(udc->dev, "%s: EP queue flushed\n", ep->usb_ep.name);
+}
+
+static void ep0_setup_rx(struct snps_udc_ep *ep,
+ struct usb_ctrlrequest *setup)
+{
+ struct snps_udc *udc = ep->udc;
+ int status;
+ u32 val;
+ u32 idx;
+ u32 len;
+
+ val = le16_to_cpu(setup->wValue);
+ idx = le16_to_cpu(setup->wIndex);
+ len = le16_to_cpu(setup->wLength);
+
+ ep->dirn = setup->bRequestType & USB_ENDPOINT_DIR_MASK;
+
+ dev_dbg(udc->dev, "%s: SETUP %02x.%02x v%04x i%04x l %04x\n",
+ ep->name, setup->bRequestType, setup->bRequest,
+ val, idx, len);
+
+ if (ep->num != 0) {
+ status = -EOPNOTSUPP;
+ } else {
+ spin_unlock(&udc->lock);
+ status = udc->gadget_driver->setup(&udc->gadget, setup);
+ spin_lock(&udc->lock);
+ }
+
+ if (status < 0)
+ ep0_setup_init(ep, status);
+ else if (len == 0)
+ ep0_setup_init(ep, 0);
+}
+
+static void irq_ep_out_setup(struct snps_udc_ep *ep)
+{
+ struct setup_desc *desc = &ep->dma.virt->setup;
+ u32 status = desc->status;
+
+ dev_dbg(ep->udc->dev, "irq set up %s desc status: 0x%x\n",
+ ep->name, status);
+
+ if ((status & DMA_STS_BUF_MASK) != DMA_STS_BUF_DMA_DONE) {
+ ep0_setup_init(ep, 0);
+ } else if ((status & DMA_STS_RX_MASK) != DMA_STS_RX_SUCCESS) {
+ ep0_setup_init(ep, 0);
+ } else {
+ desc->status = (status & ~DMA_STS_BUF_MASK)
+ | DMA_STS_BUF_HOST_BUSY;
+ ep0_setup_rx(ep, (struct usb_ctrlrequest *)&desc->data1);
+ }
+}
+
+static void irq_process_epout(struct snps_udc_ep *ep)
+{
+ struct snps_udc *udc = ep->udc;
+ u32 status;
+
+ status = get_ep_status(udc->regs, ep->num, USB_DIR_OUT);
+ clear_ep_status(udc->regs, ep->num, USB_DIR_OUT, status);
+
+ status &= EP_STS_ALL;
+
+ if (!status)
+ return;
+
+ if ((ep->dirn != USB_DIR_OUT) &&
+ (ep->type != USB_ENDPOINT_XFER_CONTROL)) {
+ dev_err(udc->dev, "%s: unexpected interrupt\n", ep->name);
+ return;
+ }
+
+ if (status & OUT_DMA_DATA_DONE) {
+ status &= ~OUT_DMA_DATA_DONE;
+ epreq_xfer_process(ep);
+ }
+
+ if (status & OUT_DMA_SETUP_DONE) {
+ status &= ~OUT_DMA_SETUP_DONE;
+ irq_ep_out_setup(ep);
+ }
+
+ if (status & DMA_BUF_NOT_AVAIL) {
+ status &= ~DMA_BUF_NOT_AVAIL;
+ dev_dbg(udc->dev, "%s: DMA BUF NOT AVAIL\n", ep->name);
+ epreq_xfer_process(ep);
+ }
+
+ if (status & DMA_ERROR) {
+ status &= ~DMA_ERROR;
+ dev_err(udc->dev, "%s: DMA ERROR\n", ep->usb_ep.name);
+ epreq_xfer_error(ep, -EIO);
+ }
+
+ if (status)
+ dev_err(udc->dev, "%s: unknown status=0x%x\n",
+ ep->name, status);
+}
+
+static void irq_process_epin(struct snps_udc_ep *ep)
+{
+ struct snps_udc *udc = ep->udc;
+ struct ep_xfer_req *ep_req;
+ u32 status;
+
+ status = get_ep_status(udc->regs, ep->num, USB_DIR_IN);
+ clear_ep_status(udc->regs, ep->num, USB_DIR_IN, status);
+
+ if (!status)
+ return;
+
+ if (ep->dirn != USB_DIR_IN) {
+ dev_err(udc->dev, "%s: unexpected OUT endpoint\n", ep->name);
+ return;
+ }
+
+ if ((ep->type == USB_ENDPOINT_XFER_ISOC) &&
+ (status & (IN_XFER_DONE | DMA_BUF_NOT_AVAIL))) {
+ dev_warn(ep->udc->dev, "%s: ISOC IN unexpected status=0x%x\n",
+ ep->name, status);
+ }
+
+ if (status & IN_TOKEN_RX) {
+ status &= ~IN_TOKEN_RX;
+ if (!ep->dma.usb_req && list_empty(&ep->queue))
+ enable_ep_nak(udc->regs, ep->num, USB_DIR_IN);
+
+ if (ep->type == USB_ENDPOINT_XFER_ISOC) {
+ ep->dma.frame_num = get_frnum_last_rx(udc->regs);
+ dev_dbg(udc->dev, "%s: ISOC IN\n", ep->name);
+ if (ep->dma.usb_req) {
+ ep->dma.usb_req->status = -EREMOTEIO;
+ epreq_xfer_process(ep);
+ }
+ }
+ }
+
+ if (is_ep_bulk() && !list_empty(&ep->queue) &&
+ ep->in_xfer_done) {
+ ep->in_xfer_done = false;
+ ep_req = list_first_entry(&ep->queue,
+ struct ep_xfer_req, queue);
+ ep->dma.usb_req = &ep_req->usb_req;
+
+ ep_data_dma_init(ep);
+ ep_data_dma_add(ep);
+ clear_ep_nak(udc->regs, ep->num, ep->dirn);
+ enable_udc_ep_irq(udc->regs, ep->num, ep->dirn);
+ enable_ep_dma(udc->regs, ep->num, ep->dirn);
+ }
+
+ if (status & IN_DMA_DONE) {
+ status &= ~IN_DMA_DONE;
+ clear_ep_nak(udc->regs, ep->num, USB_DIR_IN);
+
+ if (ep->type == USB_ENDPOINT_XFER_ISOC) {
+ dev_dbg(udc->dev, "%s: ISOC IN\n", ep->usb_ep.name);
+ epreq_xfer_process(ep);
+ } else if (ep->dma.done & IN_XFER_DONE) {
+ dev_dbg(udc->dev, "%s: late IN DMA done rec'd\n",
+ ep->name);
+ epreq_xfer_process(ep);
+ } else {
+ ep->dma.done = IN_DMA_DONE;
+ }
+ }
+
+ if (status & IN_XFER_DONE) {
+ status &= ~(IN_XFER_DONE);
+ status &= ~(IN_FIFO_EMPTY);
+
+ if (ep->dma.done & IN_DMA_DONE)
+ epreq_xfer_process(ep);
+ else
+ ep->dma.done = IN_XFER_DONE;
+ }
+
+ status &= ~(IN_FIFO_EMPTY);
+
+ if (status & DMA_BUF_NOT_AVAIL) {
+ dev_err(udc->dev, "%s: DMA BUF NOT AVAIL\n", ep->name);
+ status &= ~(DMA_BUF_NOT_AVAIL);
+ epreq_xfer_process(ep);
+ }
+
+ if (status & DMA_ERROR) {
+ status &= ~DMA_ERROR;
+ dev_err(udc->dev, "%s: DMA ERROR\n", ep->name);
+ epreq_xfer_error(ep, -EIO);
+ }
+
+ if (status)
+ dev_err(udc->dev, "%s: unknown status=0x%x\n",
+ ep->name, status);
+}
+
+static void ep_irq_process(struct snps_udc *udc, u32 irq_in, u32 irq_out)
+{
+ u32 mask = 1;
+ u32 num;
+
+ for (num = 0; num < UDC_MAX_EP; num++) {
+ if (irq_in & mask)
+ irq_process_epin(&udc->ep[num]);
+
+ if (irq_out & mask)
+ irq_process_epout(&udc->ep[num]);
+
+ mask <<= 1;
+ }
+}
+
+static void irq_process_set_intf(struct snps_udc *udc)
+{
+ struct usb_ctrlrequest setup;
+ u32 ep_num;
+ u16 intf;
+ u16 alt;
+
+ intf = (uint16_t)get_intf_num(udc->regs);
+ alt = (uint16_t)get_alt_num(udc->regs);
+
+ setup.bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD
+ | USB_RECIP_INTERFACE;
+ setup.bRequest = USB_REQ_SET_INTERFACE;
+ setup.wValue = cpu_to_le16(alt);
+ setup.wIndex = cpu_to_le16(intf);
+ setup.wLength = 0;
+
+ for (ep_num = 0; ep_num < UDC_MAX_EP; ep_num++) {
+ set_ep_alt_num(udc->regs, ep_num, alt);
+ set_ep_intf_num(udc->regs, ep_num, intf);
+ }
+ dev_info(udc->dev, "SET INTF=%d ALT=%d\n", intf, alt);
+
+ ep0_setup_rx(&udc->ep[0], &setup);
+ set_setup_done(udc->regs);
+}
+
+static void irq_process_set_cfg(struct snps_udc *udc)
+{
+ struct usb_ctrlrequest setup;
+ u32 ep_num;
+ u16 cfg;
+
+ cfg = (u16)get_cfg_num(udc->regs);
+
+ setup.bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD
+ | USB_RECIP_DEVICE;
+ setup.bRequest = USB_REQ_SET_CONFIGURATION;
+ setup.wValue = cpu_to_le16(cfg);
+ setup.wIndex = 0;
+ setup.wLength = 0;
+
+ for (ep_num = 0; ep_num < UDC_MAX_EP; ep_num++)
+ set_epcfg_reg(udc->regs, ep_num, cfg);
+
+ dev_info(udc->dev, "SET CFG=%d\n", cfg);
+
+ ep0_setup_rx(&udc->ep[0], &setup);
+ set_setup_done(udc->regs);
+}
+
+static void irq_process_speed_enum(struct snps_udc *udc)
+{
+ u32 speed = udc->gadget.speed;
+
+ switch (get_enum_speed(udc->regs)) {
+ case SPEED_HIGH:
+ dev_info(udc->dev, "HIGH SPEED\n");
+ udc->gadget.speed = USB_SPEED_HIGH;
+ break;
+ case SPEED_FULL:
+ dev_info(udc->dev, "FULL SPEED\n");
+ udc->gadget.speed = USB_SPEED_FULL;
+ break;
+ case SPEED_LOW:
+ dev_warn(udc->dev, "LOW SPEED not supported\n");
+ udc->gadget.speed = USB_SPEED_LOW;
+ break;
+ default:
+ dev_err(udc->dev, "Unknown SPEED = 0x%x\n",
+ get_enum_speed(udc->regs));
+ break;
+ }
+
+ if ((speed == USB_SPEED_UNKNOWN) &&
+ (udc->gadget.speed != USB_SPEED_UNKNOWN)) {
+ ep0_setup_init(&udc->ep[0], 0);
+ clear_devnak(udc->regs);
+ }
+}
+
+static void irq_process_bus_idle(struct snps_udc *udc)
+{
+ int num;
+
+ for (num = 0; num < UDC_MAX_EP; num++) {
+ set_ep_fifo_flush(udc->regs, num, EP_DIRN_IN);
+ clear_ep_fifo_flush(udc->regs, num, EP_DIRN_IN);
+ }
+}
+
+static void dev_irq_process(struct snps_udc *udc, u32 irq)
+{
+ if (irq & IRQ_BUS_RESET)
+ dev_info(udc->dev, "BUS RESET\n");
+
+ if (irq & IRQ_BUS_SUSPEND)
+ dev_dbg(udc->dev, "BUS SUSPEND\n");
+
+ if (irq & IRQ_BUS_IDLE) {
+ dev_dbg(udc->dev, "BUS IDLE\n");
+ irq_process_bus_idle(udc);
+ }
+
+ if (irq & IRQ_SPEED_ENUM_DONE) {
+ dev_dbg(udc->dev, "BUS speed enum done\n");
+ irq_process_speed_enum(udc);
+ }
+
+ if (irq & IRQ_SET_CFG) {
+ dev_dbg(udc->dev, "SET CFG\n");
+ irq_process_set_cfg(udc);
+ }
+
+ if (irq & IRQ_SET_INTF) {
+ dev_dbg(udc->dev, "SET INTF\n");
+ irq_process_set_intf(udc);
+ }
+}
+
+static irqreturn_t snps_udc_irq(int irq, void *dev)
+{
+ struct snps_udc *udc = (struct snps_udc *)dev;
+ u32 devintr, epin_intr, epout_intr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ devintr = get_irq_active(udc->regs);
+ epin_intr = get_ep_irq_active(udc->regs, USB_DIR_IN);
+ epout_intr = get_ep_irq_active(udc->regs, USB_DIR_OUT);
+
+ clear_udc_dev_irq(udc->regs, devintr);
+ clear_udc_ep_irq_list(udc->regs, USB_DIR_IN, epin_intr);
+ clear_udc_ep_irq_list(udc->regs, USB_DIR_OUT, epout_intr);
+
+ if (!udc->gadget_driver) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return IRQ_NONE;
+ }
+
+ /* SET_CFG and SET_INTF interrupts are handled last */
+ dev_irq_process(udc, devintr & ~(IRQ_SET_CFG | IRQ_SET_INTF));
+ ep_irq_process(udc, epin_intr, epout_intr);
+ dev_irq_process(udc, devintr & (IRQ_SET_CFG | IRQ_SET_INTF));
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ dev_dbg(udc->dev, "UDC interrupts: Dev=0x%x EpIn=0x%x EpOut=0x%x\n",
+ devintr, epin_intr, epout_intr);
+
+ return IRQ_HANDLED;
+}
+
+static int snps_ep_enable(struct usb_ep *usb_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct snps_udc_ep *ep;
+ struct snps_udc *udc;
+ unsigned long flags;
+ u32 max_pkt_size;
+ u32 xfertype;
+
+ ep = container_of(usb_ep, struct snps_udc_ep, usb_ep);
+ udc = ep->udc;
+
+ if (!usb_ep || (ep->b_ep_addr != desc->bEndpointAddress)) {
+ dev_err(udc->dev, "invalid endpoint (%p)\n", usb_ep);
+ return -EINVAL;
+ }
+
+ if (!desc || (desc->bDescriptorType != USB_DT_ENDPOINT)) {
+ dev_err(udc->dev, "ep%d: invalid descriptor=%p\n",
+ ep->num, desc);
+ return -EINVAL;
+ }
+
+ if (desc == ep->desc) {
+ dev_err(udc->dev, "ep%d: already enabled\n", ep->num);
+ return -EEXIST;
+ }
+
+ if (ep->desc) {
+ dev_err(udc->dev, "ep%d:already enabled wth other descr\n",
+ ep->num);
+ return -EBUSY;
+ }
+
+ if (!udc->gadget_driver) {
+ dev_warn(udc->dev, "%s: invalid device state\n", ep->name);
+ return -ESHUTDOWN;
+ }
+
+ xfertype = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ max_pkt_size = le16_to_cpu(desc->wMaxPacketSize) & 0x7FF;
+
+ if (!max_pkt_size || (max_pkt_size > ep->max_pkt_size)) {
+ dev_err(udc->dev, "%s: invalid max pkt size\n", ep->name);
+ return -ERANGE;
+ }
+
+ if ((ep->dirn == USB_DIR_IN) &&
+ (xfertype == USB_ENDPOINT_XFER_ISOC)) {
+ if ((desc->bInterval < 1) || (desc->bInterval > 16)) {
+ dev_err(udc->dev, "%s: invalid binterval\n", ep->name);
+ return -ERANGE;
+ }
+ ep->dma.frame_num = FRAME_NUM_INVALID;
+ ep->dma.frame_incr = 1 << (desc->bInterval - 1);
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (ep_cfg(ep, xfertype, max_pkt_size) != 0) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ dev_err(udc->dev, "%s: not enough FIFO space\n", ep->name);
+ return -ENOSPC;
+ }
+
+ set_epcfg_reg(udc->regs, ep->num, get_cfg_num(udc->regs));
+
+ ep->desc = desc;
+ ep->stopped = 0;
+ ep->usb_ep.maxpacket = max_pkt_size;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ dev_dbg(udc->dev, "%s: enabled: type: 0x%x, max_pkt_size: %d\n",
+ ep->name, xfertype, max_pkt_size);
+
+ return 0;
+}
+
+static int snps_ep_disable(struct usb_ep *usb_ep)
+{
+ struct snps_udc_ep *ep;
+ struct snps_udc *udc;
+ unsigned long flags;
+
+ ep = container_of(usb_ep, struct snps_udc_ep, usb_ep);
+ udc = ep->udc;
+
+ if (!usb_ep || !ep->desc) {
+ dev_err(udc->dev, "%s: invalid endpoint\n", ep->usb_ep.name);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ epreq_queue_flush(ep, -ESHUTDOWN);
+ ep->desc = NULL;
+ ep->usb_ep.maxpacket = ep->max_pkt_size;
+ fifo_ram_free(ep);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static struct usb_request *
+snps_ep_alloc_request(struct usb_ep *usb_ep, gfp_t gfp_flags)
+{
+ struct ep_xfer_req *ep_req;
+
+ if (!usb_ep)
+ return NULL;
+
+ ep_req = kzalloc(sizeof(*ep_req), gfp_flags);
+ if (ep_req) {
+ INIT_LIST_HEAD(&ep_req->queue);
+ ep_req->usb_req.dma = DMA_ADDR_INVALID;
+ pr_debug("%s: ep alloc req\n", usb_ep->name);
+ return &ep_req->usb_req;
+ }
+
+ return NULL;
+}
+
+static void snps_ep_free_request(struct usb_ep *usb_ep,
+ struct usb_request *usb_req)
+{
+ struct ep_xfer_req *ep_req;
+
+ ep_req = container_of(usb_req, struct ep_xfer_req, usb_req);
+
+ if (usb_req) {
+ pr_debug("%s: freed\n", usb_ep->name);
+ kfree(ep_req);
+ }
+}
+
+static int snps_ep_queue(struct usb_ep *usb_ep,
+ struct usb_request *usb_req, gfp_t gfp_flags)
+{
+ struct ep_xfer_req *ep_req;
+ struct snps_udc_ep *ep;
+ struct snps_udc *udc;
+ unsigned long flags;
+
+ ep = container_of(usb_ep, struct snps_udc_ep, usb_ep);
+ ep_req = container_of(usb_req, struct ep_xfer_req, usb_req);
+
+ dev_dbg(ep->udc->dev, "%s: %s\n", __func__, ep->usb_ep.name);
+ if (!usb_ep || !usb_req || !ep_req->usb_req.complete ||
+ !ep_req->usb_req.buf || !list_empty(&ep_req->queue)) {
+ dev_dbg(ep->udc->dev, "%s:invalid queue request\n", ep->name);
+ return -EINVAL;
+ }
+
+ if (!ep->desc && (ep->num != 0)) {
+ dev_err(ep->udc->dev, "%s: invalid EP state\n", ep->name);
+ return -EFAULT;
+ }
+
+ if ((ep->type == USB_ENDPOINT_XFER_CONTROL) &&
+ !list_empty(&ep->queue)) {
+ dev_err(ep->udc->dev, "%s: EP queue not empty\n", ep->name);
+ return -EPERM;
+ }
+
+ if (usb_req->length > 0xffff) {
+ dev_err(ep->udc->dev, "%s: request too big\n", ep->name);
+ return -E2BIG;
+ }
+
+ if ((ep->type == USB_ENDPOINT_XFER_ISOC) &&
+ (ep->dirn == USB_DIR_IN) &&
+ (usb_req->length > ep->usb_ep.maxpacket)) {
+ dev_err(ep->udc->dev, "%s: request > scheduled bandwidth, length=%u\n",
+ ep->name, usb_req->length);
+ return -EFBIG;
+ }
+
+ udc = ep->udc;
+ if (!udc->gadget_driver) {
+ dev_err(udc->dev, "%s: invalid device state\n", ep->name);
+ return -ESHUTDOWN;
+ }
+
+ if (((unsigned long)ep_req->usb_req.buf) & 0x3UL) {
+ dev_dbg(udc->dev, "%s: invalid buffer alignment: addr=0x%p\n",
+ ep->usb_ep.name, ep_req->usb_req.buf);
+
+ if ((ep->dma.aligned_buf) &&
+ (ep->dma.aligned_len < ep_req->usb_req.length)) {
+ dma_free_coherent(NULL, ep->dma.aligned_len,
+ ep->dma.aligned_buf,
+ ep->dma.aligned_addr);
+ ep->dma.aligned_buf = NULL;
+ }
+
+ if (!ep->dma.aligned_buf) {
+ ep->dma.aligned_len = ep_req->usb_req.length;
+ ep->dma.aligned_buf = dma_alloc_coherent(NULL,
+ ep->dma.aligned_len, &ep->dma.aligned_addr,
+ GFP_ATOMIC);
+ }
+
+ if (!ep->dma.aligned_buf) {
+ dev_err(udc->dev, "%s: ep dma alloc failed\n",
+ ep->name);
+ return -ENOMEM;
+ }
+
+ ep_req->dma_aligned = 1;
+ } else if ((ep_req->usb_req.dma == DMA_ADDR_INVALID) ||
+ (ep_req->usb_req.dma == 0)) {
+ ep_req->dma_mapped = 1;
+ ep_req->usb_req.dma = dma_map_single(
+ ep->udc->gadget.dev.parent,
+ ep_req->usb_req.buf,
+ (ep_req->usb_req.length ?
+ ep_req->usb_req.length : 1),
+ (ep->dirn == USB_DIR_IN ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ if (dma_mapping_error(ep->udc->gadget.dev.parent,
+ ep_req->usb_req.dma)) {
+ dev_err(ep->udc->gadget.dev.parent,
+ "failed to map buffer\n");
+ return -EFAULT;
+ }
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ ep_req->usb_req.status = -EINPROGRESS;
+ ep_req->usb_req.actual = 0;
+
+ if ((ep->type == USB_ENDPOINT_XFER_CONTROL) &&
+ (ep->dirn == USB_DIR_OUT) &&
+ (ep_req->usb_req.length == 0)) {
+ epreq_xfer_done(ep, ep_req, 0);
+ } else {
+ if (ep_req->usb_req.length == 0)
+ ep_req->usb_req.zero = 1;
+
+ epreq_xfer_add(ep, ep_req);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int snps_ep_dequeue(struct usb_ep *usb_ep,
+ struct usb_request *usb_req)
+{
+ struct ep_xfer_req *ep_req;
+ struct snps_udc_ep *ep;
+ unsigned long flags;
+
+ ep = container_of(usb_ep, struct snps_udc_ep, usb_ep);
+ ep_req = container_of(usb_req, struct ep_xfer_req, usb_req);
+
+ if (!usb_ep || !usb_req) {
+ dev_err(ep->udc->dev, "%s: invalid dequeue request\n",
+ ep->name);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ list_for_each_entry(ep_req, &ep->queue, queue) {
+ if (&ep_req->usb_req == usb_req)
+ break;
+ }
+
+ if (&ep_req->usb_req != usb_req) {
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ dev_err(ep->udc->dev, "%s: request not queued\n", ep->name);
+ return -ENOLINK;
+ }
+
+ epreq_xfer_done(ep, ep_req, -ECONNRESET);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ dev_dbg(ep->udc->dev, "%s: req=0x%p\n", ep->name, usb_req);
+ return 0;
+}
+
+static int snps_ep_set_halt(struct usb_ep *usb_ep, int halt)
+{
+ struct snps_udc_ep *ep;
+ unsigned long flags;
+ struct snps_udc *udc;
+
+ ep = container_of(usb_ep, struct snps_udc_ep, usb_ep);
+ udc = ep->udc;
+ if (!usb_ep) {
+ dev_err(udc->dev, "%s: invalid halt request\n", ep->name);
+ return -EINVAL;
+ }
+
+ if (ep->type == USB_ENDPOINT_XFER_ISOC) {
+ dev_err(udc->dev, "%s: unsupported halt req\n", ep->name);
+ return -EOPNOTSUPP;
+ }
+
+ if (halt && (ep->dirn == USB_DIR_IN) &&
+ !list_empty(&ep->queue)) {
+ dev_err(udc->dev, "%s: EP IN queue not empty\n", ep->name);
+ return -EAGAIN;
+ }
+
+ if (!halt && (ep->type == USB_ENDPOINT_XFER_CONTROL)) {
+ dev_err(udc->dev, "%s: CTRL HALT clear\n", ep->name);
+ return -EPROTO;
+ }
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ if (!halt) {
+ disable_ep_stall(udc->regs, ep->num, ep->dirn);
+ } else if (ep->type != USB_ENDPOINT_XFER_CONTROL) {
+ enable_ep_stall(udc->regs, ep->num, ep->dirn);
+ } else {
+ enable_ep_stall(udc->regs, ep->num, USB_DIR_IN);
+ enable_ep_stall(udc->regs, ep->num, USB_DIR_OUT);
+ }
+
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ dev_dbg(udc->dev, "%s: HALT %s done\n", ep->name,
+ halt ? "SET" : "CLR");
+
+ return 0;
+}
+
+static struct usb_ep_ops snps_ep_ops = {
+ .enable = snps_ep_enable,
+ .disable = snps_ep_disable,
+
+ .alloc_request = snps_ep_alloc_request,
+ .free_request = snps_ep_free_request,
+
+ .queue = snps_ep_queue,
+ .dequeue = snps_ep_dequeue,
+
+ .set_halt = snps_ep_set_halt,
+};
+
+static int eps_init(struct snps_udc *udc)
+{
+ struct snps_udc_ep *ep;
+ int i, ret;
+
+ /* Initialize Endpoint 0 */
+ ep = &udc->ep[0];
+ ep->udc = udc;
+ ep->num = 0;
+ ep->in_xfer_done = true;
+ ep->dirn = USB_DIR_OUT;
+ ep->b_ep_addr = ep->num | ep->dirn;
+ strncpy(ep->name, "ep0", sizeof(ep->name));
+ ep->usb_ep.name = ep->name;
+ ep->max_pkt_size = EP_CTRL_MAX_PKT_SIZE;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, EP_CTRL_MAX_PKT_SIZE);
+ ep->usb_ep.ops = &snps_ep_ops;
+ ep->stopped = 0;
+ ep->usb_ep.caps.type_control = true;
+ ep->usb_ep.caps.dir_in = true;
+ ep->usb_ep.caps.dir_out = true;
+ INIT_LIST_HEAD(&ep->queue);
+ ep->type = USB_ENDPOINT_XFER_CONTROL;
+ ep->usb_ep.maxpacket = EP_CTRL_MAX_PKT_SIZE;
+
+ if (udc->conn_type)
+ ep_dma_init(ep);
+
+ dev_dbg(udc->dev, "%s: type: 0x%x, Dir:0x%x, Max Size: %d\n",
+ ep->name, ep->type, ep->dirn, ep->max_pkt_size);
+
+ /* Initialize remaining endpoints */
+ for (i = 1; i < UDC_MAX_EP; i++) {
+ ep = &udc->ep[i];
+ ep->udc = udc;
+ ep->max_pkt_size = EP_MAX_PKT_SIZE;
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, EP_MAX_PKT_SIZE);
+ ep->usb_ep.ops = &snps_ep_ops;
+ ep->in_xfer_done = true;
+ ep->num = i;
+ if (i % 2) {
+ snprintf(ep->name, sizeof(ep->name), "ep%din", i);
+ ep->dirn = EP_DIRN_IN;
+ ep->usb_ep.caps.dir_in = true;
+ } else {
+ snprintf(ep->name, sizeof(ep->name), "ep%dout", i);
+ ep->dirn = EP_DIRN_OUT;
+ ep->usb_ep.caps.dir_out = true;
+ }
+ ep->usb_ep.name = ep->name;
+ ep->b_ep_addr = ep->num | ep->dirn;
+
+ ep->usb_ep.caps.type_iso = true;
+ ep->usb_ep.caps.type_bulk = true;
+ ep->usb_ep.caps.type_int = true;
+ ep->stopped = 0;
+ ep->usb_ep.maxpacket = EP_MAX_PKT_SIZE;
+
+ INIT_LIST_HEAD(&ep->queue);
+ if (udc->conn_type)
+ ep_dma_init(ep);
+
+ dev_dbg(udc->dev, "%s: type: 0x%x, Dir: 0x%x, Max Size: %d\n",
+ ep->name, ep->type, ep->dirn, ep->max_pkt_size);
+ }
+
+ udc->rx_fifo_space = OUT_RX_FIFO_MEM_SIZE;
+ udc->tx_fifo_space = IN_TX_FIFO_MEM_SIZE;
+ ret = ep_cfg(&udc->ep[0], USB_ENDPOINT_XFER_CONTROL,
+ EP_CTRL_MAX_PKT_SIZE);
+ if (ret) {
+ dev_err(udc->dev, "Synopsys-UDC: error configuring endpoints\n");
+ return ret;
+ }
+
+ dev_dbg(udc->dev, "Synopsys UDC Endpoints initialized\n");
+ return 0;
+}
+
+static void start_udc(struct snps_udc *udc)
+{
+ int i;
+
+ init_udc_reg(udc->regs);
+
+ udc->rx_fifo_space = OUT_RX_FIFO_MEM_SIZE;
+ udc->tx_fifo_space = IN_TX_FIFO_MEM_SIZE;
+
+ eps_init(udc);
+ enable_self_pwr(udc->regs);
+
+ enable_udc_dev_irq(udc->regs, IRQ_SPEED_ENUM_DONE | IRQ_BUS_SUSPEND |
+ IRQ_BUS_IDLE | IRQ_BUS_RESET | IRQ_SET_INTF |
+ IRQ_SET_CFG);
+
+ for (i = 0; i < UDC_MAX_EP; ++i) {
+ if (udc->ep[i].usb_ep.name) {
+ enable_udc_ep_irq(udc->regs,
+ udc->ep[i].num, USB_DIR_OUT);
+ enable_udc_ep_irq(udc->regs,
+ udc->ep[i].num, USB_DIR_IN);
+ }
+ }
+
+ clear_devnak(udc->regs);
+ enable_ctrl_dma(udc->regs);
+ bus_connect(udc->regs);
+
+ dev_dbg(udc->dev, "Synopsys UDC started\n");
+}
+
+static void stop_udc(struct snps_udc *udc)
+{
+ finish_udc(udc->regs);
+
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ epreq_queue_flush(&udc->ep[0], -ESHUTDOWN);
+ udc->ep[0].desc = NULL;
+
+ bus_disconnect(udc->regs);
+
+ if (udc->gadget_driver && udc->gadget_driver->disconnect) {
+ spin_unlock(&udc->lock);
+ udc->gadget_driver->disconnect(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+
+ dev_dbg(udc->dev, "Synopsys UDC stopped\n");
+}
+
+static int snps_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct snps_udc *udc;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct snps_udc, gadget);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!udc->gadget_driver) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+ }
+
+ if (is_on && udc->pullup_on) {
+ start_udc(udc);
+ udc->ep[0].stopped = 0;
+ dev_info(udc->dev, "Synopsys UDC device connected\n");
+ } else if (!is_on && !udc->pullup_on) {
+ stop_udc(udc);
+ udc->ep[0].stopped = 1;
+ dev_info(udc->dev, "Synopsys UDC device Disconnected\n");
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int snps_gadget_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct snps_udc *udc;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct snps_udc, gadget);
+
+ if (udc->gadget_driver)
+ return -EBUSY;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ driver->driver.bus = NULL;
+ udc->gadget_driver = driver;
+ udc->gadget.dev.driver = &driver->driver;
+ udc->ep[0].stopped = 0;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ /* when cable is connected at boot time */
+ if (udc->conn_type)
+ schedule_delayed_work(&udc->drd_work, USBD_WQ_DELAY_MS);
+ dev_dbg(udc->dev, "%s: Done\n", __func__);
+
+ return 0;
+}
+
+static int snps_gadget_stop(struct usb_gadget *gadget)
+{
+ struct snps_udc_ep *ep;
+ struct snps_udc *udc;
+ unsigned long flags;
+
+ udc = container_of(gadget, struct snps_udc, gadget);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ stop_udc(udc);
+ udc->gadget.dev.driver = NULL;
+ udc->gadget_driver = NULL;
+
+ list_for_each_entry(ep, &udc->gadget.ep_list, usb_ep.ep_list) {
+ epreq_queue_flush(ep, -ESHUTDOWN);
+ if (ep->desc)
+ ep->desc = NULL;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ dev_dbg(udc->dev, "%s: Done\n", __func__);
+
+ return 0;
+}
+
+static struct usb_gadget_ops snps_gadget_ops = {
+ .pullup = snps_gadget_pullup,
+ .udc_start = snps_gadget_start,
+ .udc_stop = snps_gadget_stop,
+};
+
+void snps_udc_drd_work(struct work_struct *work)
+{
+ struct snps_udc *udc;
+
+ udc = container_of(to_delayed_work(work),
+ struct snps_udc, drd_work);
+
+ if (udc->conn_type) {
+ dev_dbg(udc->dev, "idle -> device\n");
+ if (udc->gadget_driver) {
+ udc->pullup_on = 1;
+ snps_gadget_pullup(&udc->gadget, 1);
+ }
+ } else {
+ dev_dbg(udc->dev, "device -> idle\n");
+ udc->pullup_on = 0;
+ snps_gadget_pullup(&udc->gadget, 0);
+ }
+}
+
+static int usbd_connect_notify(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct snps_udc *udc = container_of(self, struct snps_udc, nb);
+
+ dev_dbg(udc->dev, "%s: event: %lu\n", __func__, event);
+
+ udc->conn_type = event;
+
+ schedule_delayed_work(&udc->drd_work, USBD_WQ_DELAY_MS);
+
+ return NOTIFY_OK;
+}
+
+static void free_udc_dma(struct platform_device *pdev, struct snps_udc *udc)
+{
+ u32 num;
+
+ dma_free_coherent(&pdev->dev, sizeof(struct ep_desc_array),
+ udc->dma.virt, (dma_addr_t)udc->dma.phys);
+
+ for (num = 0; num < UDC_MAX_EP; num++) {
+ if (udc->ep[num].dma.aligned_buf) {
+ dma_free_coherent(NULL, udc->ep[num].dma.aligned_len,
+ udc->ep[num].dma.aligned_buf,
+ udc->ep[num].dma.aligned_addr);
+ udc->ep[num].dma.aligned_buf = NULL;
+ }
+ }
+}
+
+static int snps_udc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct snps_udc *udc;
+ int i, ret;
+
+ udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+
+ spin_lock_init(&udc->lock);
+ udc->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ udc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(udc->regs))
+ return PTR_ERR(udc->regs);
+
+ udc->irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (udc->irq <= 0) {
+ dev_err(dev, "Can't parse and map interrupt\n");
+ return -EINVAL;
+ }
+
+ udc->udc_phy = devm_phy_get(dev, "usb2drd");
+ if (IS_ERR(udc->udc_phy)) {
+ dev_err(dev, "Failed to obtain phy from device tree\n");
+ return PTR_ERR(udc->udc_phy);
+ }
+
+ ret = phy_init(udc->udc_phy);
+ if (ret) {
+ dev_err(dev, "UDC phy init failed");
+ return ret;
+ }
+
+ ret = phy_power_on(udc->udc_phy);
+ if (ret) {
+ dev_err(dev, "UDC phy power on failed");
+ phy_exit(udc->udc_phy);
+ return ret;
+ }
+
+ udc->edev = extcon_get_edev_by_phandle(dev, 0);
+ if (IS_ERR(udc->edev)) {
+ if (PTR_ERR(udc->edev) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_err(dev, "Invalid or missing extcon\n");
+ ret = PTR_ERR(udc->edev);
+ goto exit_phy;
+ }
+
+ udc->nb.notifier_call = usbd_connect_notify;
+ ret = extcon_register_notifier(udc->edev, EXTCON_USB, &udc->nb);
+ if (ret < 0) {
+ dev_err(dev, "Can't register extcon device\n");
+ goto exit_phy;
+ }
+
+ ret = extcon_get_cable_state_(udc->edev, EXTCON_USB);
+ if (ret < 0) {
+ dev_err(dev, "Can't get cable state\n");
+ goto exit_extcon;
+ } else if (ret) {
+ udc->conn_type = ret;
+ }
+
+ udc->dma.virt = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct ep_desc_array),
+ (dma_addr_t *)&udc->dma.phys,
+ GFP_KERNEL);
+ if (!udc->dma.virt) {
+ dev_err(dev, "Failed to allocate memory for ep\n");
+ ret = -ENOMEM;
+ goto exit_extcon;
+ }
+
+ INIT_DELAYED_WORK(&udc->drd_work, snps_udc_drd_work);
+
+ ret = devm_request_irq(dev, udc->irq, snps_udc_irq, IRQF_SHARED,
+ "snps-udc", udc);
+ if (ret < 0) {
+ dev_err(dev, "Request irq %d failed for UDC\n", udc->irq);
+ goto exit_dma;
+ }
+
+ /* Gagdet structure init */
+ udc->gadget.name = "snps-udc";
+ udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.ops = &snps_gadget_ops;
+ udc->gadget.ep0 = &udc->ep[0].usb_ep;
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ eps_init(udc);
+ for (i = 1; i < UDC_MAX_EP; i++) {
+ list_add_tail(&udc->ep[i].usb_ep.ep_list,
+ &udc->gadget.ep_list);
+ }
+
+ ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (ret) {
+ dev_err(dev, "Error adding gadget udc: %d\n", ret);
+ goto exit_dma;
+ }
+
+ platform_set_drvdata(pdev, udc);
+ dev_info(dev, "Synopsys UDC driver probe successful\n");
+
+ return 0;
+exit_dma:
+ free_udc_dma(pdev, udc);
+exit_extcon:
+ extcon_unregister_notifier(udc->edev, EXTCON_USB, &udc->nb);
+exit_phy:
+ phy_power_off(udc->udc_phy);
+ phy_exit(udc->udc_phy);
+
+ return ret;
+}
+
+static int snps_udc_remove(struct platform_device *pdev)
+{
+ struct snps_udc *udc;
+
+ udc = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (udc->drd_wq) {
+ flush_workqueue(udc->drd_wq);
+ destroy_workqueue(udc->drd_wq);
+ }
+
+ free_udc_dma(pdev, udc);
+ phy_power_off(udc->udc_phy);
+ phy_exit(udc->udc_phy);
+ extcon_unregister_notifier(udc->edev, EXTCON_USB, &udc->nb);
+
+ dev_info(&pdev->dev, "Synopsys UDC driver removed\n");
+
+ return 0;
+}
+
+static void snps_udc_shutdown(struct platform_device *pdev)
+{
+ struct snps_udc *udc = platform_get_drvdata(pdev);
+
+ snps_gadget_stop(&udc->gadget);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int snps_udc_suspend(struct device *dev)
+{
+ struct snps_udc *udc;
+
+ udc = dev_get_drvdata(dev);
+
+ if (extcon_get_cable_state_(udc->edev, EXTCON_USB) > 0) {
+ dev_dbg(udc->dev, "device -> idle\n");
+ snps_gadget_pullup(&udc->gadget, 0);
+ }
+ phy_power_off(udc->udc_phy);
+ phy_exit(udc->udc_phy);
+
+ return 0;
+}
+
+static int snps_udc_resume(struct device *dev)
+{
+ struct snps_udc *udc;
+ int ret;
+
+ udc = dev_get_drvdata(dev);
+
+ ret = phy_init(udc->udc_phy);
+ if (ret) {
+ dev_err(udc->dev, "UDC phy init failure");
+ return ret;
+ }
+
+ ret = phy_power_on(udc->udc_phy);
+ if (ret) {
+ dev_err(udc->dev, "UDC phy power on failure");
+ phy_exit(udc->udc_phy);
+ return ret;
+ }
+
+ if (extcon_get_cable_state_(udc->edev, EXTCON_USB) > 0) {
+ dev_dbg(udc->dev, "idle -> device\n");
+ snps_gadget_pullup(&udc->gadget, 1);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops snps_udc_pm_ops = {
+ .suspend = snps_udc_suspend,
+ .resume = snps_udc_resume,
+};
+#endif
+
+static const struct of_device_id of_udc_match[] = {
+ { .compatible = "snps,dw-ahb-udc", },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, of_udc_match);
+
+static struct platform_driver snps_udc_driver = {
+ .probe = snps_udc_probe,
+ .remove = snps_udc_remove,
+ .shutdown = snps_udc_shutdown,
+ .driver = {
+ .name = "snps-udc",
+ .of_match_table = of_match_ptr(of_udc_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &snps_udc_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(snps_udc_driver);
+
+MODULE_ALIAS("platform:snps-udc");
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/udc/snps_udc.h b/drivers/usb/gadget/udc/snps_udc.h
new file mode 100644
index 0000000..0355d59
--- /dev/null
+++ b/drivers/usb/gadget/udc/snps_udc.h
@@ -0,0 +1,1071 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SNPS_UDC_H
+#define __SNPS_UDC_H
+
+/* UDC speeds */
+#define SPEED_UNKNOWN (0)
+#define SPEED_LOW (1)
+#define SPEED_FULL (2)
+#define SPEED_HIGH (3)
+
+/* Endpoint directions */
+#define EP_DIRN_IN (0x80)
+#define EP_DIRN_OUT (0x00)
+#define EP_DIRN_MASK (0x80)
+
+/* Endpoint types */
+#define EP_TYPE_CTRL (0)
+#define EP_TYPE_ISOC (1)
+#define EP_TYPE_BULK (2)
+#define EP_TYPE_INTR (3)
+#define EP_TYPE_MASK (0x03)
+
+/* Max supported endpoints */
+#define UDC_MAX_EP (10)
+
+#define EP_MAX_PKT_SIZE 512
+#define EP_CTRL_MAX_PKT_SIZE 64
+#define OUT_RX_FIFO_MEM_SIZE 4096
+#define IN_TX_FIFO_MEM_SIZE 4096
+
+#define is_ep_in() ((ep->dirn) == USB_DIR_IN)
+#define is_ep_out() ((ep->dirn) == USB_DIR_OUT)
+#define is_ep_bulk() ((ep->type) == USB_ENDPOINT_XFER_BULK)
+
+#define DESC_CNT (1)
+
+#define EP_DMA_DESC_IDX_MASK (DESC_CNT - 1)
+#define EP_DMA_DESC_IDX(num) ((num) & EP_DMA_DESC_IDX_MASK)
+
+#define USB_MODE_IDLE (1)
+#define USB_MODE_DEVICE (2)
+
+#define FIFO_SZ_U32(pkt_sz) (((pkt_sz) + 3) / sizeof(u32))
+#define FIFO_SZ_U8(sz) (FIFO_SZ_U32(sz) * sizeof(u32))
+#define USBD_WQ_DELAY_MS msecs_to_jiffies(100)
+/* Register Masks and definitions */
+
+/* Endpoint Control Registers*/
+#define EP_CTRL_OUT_FLUSH_ENABLE BIT(12)
+#define EP_CTRL_OUT_CLOSE_DESC BIT(11)
+#define EP_CTRL_IN_SEND_NULL BIT(10)
+#define EP_CTRL_OUT_DMA_ENABLE BIT(9)
+#define EP_CTRL_NAK_CLEAR BIT(8)
+#define EP_CTRL_NAK_SET BIT(7)
+#define EP_CTRL_NAK_IN_PROGRESS BIT(6)
+#define EP_CTRL_TYPE_SHIFT (4)
+#define EP_CTRL_TYPE_MASK (3 << EP_CTRL_TYPE_SHIFT)
+#define EP_CTRL_IN_DMA_ENABLE BIT(3)
+#define EP_CTRL_SNOOP_ENABLE BIT(2)
+#define EP_CTRL_IN_FLUSH_ENABLE BIT(1)
+#define EP_CTRL_STALL_ENABLE BIT(0)
+
+/* Endpoint Status Registers */
+#define EP_STS_CLOSE_DESC_CLEAR BIT(28)
+#define EP_STS_IN_XFER_DONE BIT(27)
+#define EP_STS_STALL_SET_RX BIT(26)
+#define EP_STS_STALL_CLEAR_RX BIT(25)
+#define EP_STS_IN_FIFO_EMPTY BIT(24)
+#define EP_STS_IN_DMA_DONE BIT(10)
+#define EP_STS_AHB_BUS_ERROR BIT(9)
+#define EP_STS_OUT_FIFO_EMPTY BIT(8)
+#define EP_STS_DMA_BUF_NOT_AVAIL BIT(7)
+#define EP_STS_IN_TOKEN_RX BIT(6)
+#define EP_STS_OUT_DMA_SETUP_DONE BIT(5)
+#define EP_STS_OUT_DMA_DATA_DONE BIT(4)
+
+/* Buffer Regs for EP In, Receive Packet Frame Num Regs for EP Out */
+#define EP_REG2_OUT_ISOC_PID_SHIFT (16)
+#define EP_REG2_OUT_ISOC_PID_MASK (3 << EP_REG2_OUT_ISOC_PID_SHIFT)
+#define EP_REG2_IN_DEPTH_SHIFT (0)
+#define EP_REG2_IN_DEPTH_MASK (0xffff << EP_REG2_IN_DEPTH_SHIFT)
+#define EP_REG2_OUT_FRAME_NUM_SHIFT EP_REG2_IN_DEPTH_SHIFT
+#define EP_REG2_OUT_FRAME_NUM_MASK EP_REG2_IN_DEPTH_MASK
+
+/* Max Packet Size Regs for EP In, Buffer Size Regs for EP Out */
+#define EP_REG3_OUT_DEPTH_SHIFT (16)
+#define EP_REG3_OUT_DEPTH_MASK (0xffff << EP_REG3_OUT_DEPTH_SHIFT)
+#define EP_REG3_PKT_MAX_SHIFT (0)
+#define EP_REG3_PKT_MAX_MASK (0xffff << EP_REG3_PKT_MAX_SHIFT)
+
+/* Endpoint Config Registers */
+#define EP_CFG_DIRN_IN BIT(4)
+#define EP_CFG_DIRN_OUT (0)
+#define EP_CFG_PKT_MAX_SHIFT (19)
+#define EP_CFG_PKT_MAX_MASK (0x7ff << EP_CFG_PKT_MAX_SHIFT)
+#define EP_CFG_ALT_NUM_SHIFT (15)
+#define EP_CFG_ALT_NUM_MASK (0xf << EP_CFG_ALT_NUM_SHIFT)
+#define EP_CFG_INTF_NUM_SHIFT (11)
+#define EP_CFG_INTF_NUM_MASK (0xf << EP_CFG_INTF_NUM_SHIFT)
+#define EP_CFG_CFG_NUM_SHIFT (7)
+#define EP_CFG_CFG_NUM_MASK (0xf << EP_CFG_CFG_NUM_SHIFT)
+#define EP_CFG_TYPE_SHIFT (5)
+#define EP_CFG_TYPE_MASK (0x3 << EP_CFG_TYPE_SHIFT)
+#define EP_CFG_FIFO_NUM_SHIFT (0)
+#define EP_CFG_FIFO_NUM_MASK (0xf << EP_CFG_FIFO_NUM_SHIFT)
+
+/* Endpoint Interrupt Registers */
+#define EP_INTR_OUT_SHIFT (16)
+#define EP_INTR_OUT_MASK (0xffff << EP_INTR_OUT_SHIFT)
+#define EP_INTR_IN_SHIFT (0)
+#define EP_INTR_IN_MASK (0xffff << EP_INTR_IN_SHIFT)
+
+/* Device Config Register */
+#define CFG_ULPI_DDR_ENABLE BIT(19)
+#define CFG_SET_DESCRIPTOR_ENABLE BIT(18)
+#define CFG_CSR_PROGRAM_ENABLE BIT(17)
+#define CFG_HALT_STALL_ENABLE BIT(16)
+#define CFG_HS_TIMEOUT_CALIB_SHIFT (13)
+#define CFG_HS_TIMEOUT_CALIB_MASK (7 << CFG_HS_TIMEOUT_CALIB_SHIFT)
+#define CFG_FS_TIMEOUT_CALIB_SHIFT (10)
+#define CFG_FS_TIMEOUT_CALIB_MASK (7 << CFG_FS_TIMEOUT_CALIB_SHIFT)
+#define CFG_STS_1_ENABLE BIT(8)
+#define CFG_STS_ENABLE BIT(7)
+#define CFG_UTMI_BI_DIRN_ENABLE BIT(6)
+#define CFG_UTMI_8BIT_ENABLE BIT(5)
+#define CFG_SYNC_FRAME_ENABLE BIT(4)
+#define CFG_SELF_PWR_ENABLE BIT(3)
+#define CFG_REMOTE_WAKEUP_ENABLE BIT(2)
+#define CFG_SPD_SHIFT (0)
+#define CFG_SPD_MASK (3 << CFG_SPD_SHIFT)
+#define CFG_SPD_HS (0 << CFG_SPD_SHIFT)
+#define CFG_SPD_FS BIT(0)
+#define CFG_SPD_LS (2 << CFG_SPD_SHIFT)
+#define CFG_SPD_FS_48MHZ (3 << CFG_SPD_SHIFT)
+
+/* Device Control Register*/
+#define CTRL_DMA_OUT_THRESH_LEN_SHIFT (24)
+#define CTRL_DMA_OUT_THRESH_LEN_MASK (0xff << CTRL_DMA_OUT_THRESH_LEN_SHIFT)
+#define CTRL_DMA_BURST_LEN_SHIFT (16)
+#define CTRL_DMA_BURST_LEN_MASK (0xff << CTRL_DMA_BURST_LEN_SHIFT)
+#define CTRL_OUT_FIFO_FLUSH_ENABLE BIT(14)
+#define CTRL_CSR_DONE BIT(13)
+#define CTRL_OUT_ALL_NAK BIT(12)
+#define CTRL_DISCONNECT_ENABLE BIT(10)
+#define CTRL_DMA_MODE_ENABLE BIT(9)
+#define CTRL_DMA_BURST_ENABLE BIT(8)
+#define CTRL_DMA_OUT_THRESH_ENABLE BIT(7)
+#define CTRL_DMA_BUFF_FILL_MODE_ENABLE BIT(6)
+#define CTRL_ENDIAN_BIG_ENABLE BIT(5)
+#define CTRL_DMA_DESC_UPDATE_ENABLE BIT(4)
+#define CTRL_DMA_IN_ENABLE BIT(3)
+#define CTRL_DMA_OUT_ENABLE BIT(2)
+#define CTRL_RESUME_SIGNAL_ENABLE BIT(0)
+#define CTRL_LE_ENABLE (0)
+
+/* Device Status Register */
+#define STS_SOF_FRAME_NUM_SHIFT (18)
+#define STS_SOF_FRAME_NUM_MASK (0x3ffff << STS_SOF_FRAME_NUM_SHIFT)
+#define STS_REMOTE_WAKEUP_ALLOWED BIT(17)
+#define STS_PHY_ERROR BIT(16)
+#define STS_OUT_FIFO_EMPTY BIT(15)
+#define STS_SPD_SHIFT (13)
+#define STS_SPD_MASK (3 << STS_SPD_SHIFT)
+#define STS_SPD_HS (0 << STS_SPD_SHIFT)
+#define STS_SPD_FS BIT(13)
+#define STS_SPD_LS (2 << STS_SPD_SHIFT)
+#define STS_SPD_FS_48MHZ (3 << STS_SPD_SHIFT)
+#define STS_BUS_SUSPENDED BIT(12)
+#define STS_ALT_NUM_SHIFT (8)
+#define STS_ALT_NUM_MASK (0xf << STS_SPD_SHIFT)
+#define STS_INTF_NUM_SHIFT (4)
+#define STS_INTF_NUM_MASK (0xf << STS_INTF_NUM_SHIFT)
+#define STS_CFG_NUM_SHIFT (0)
+#define STS_CFG_NUM_MASK (0xf << STS_CFG_NUM_SHIFT)
+
+/* Device Interrupt Register */
+#define INTR_REMOTE_WAKEUP_DELTA BIT(7)
+#define INTR_SPD_ENUM_DONE BIT(6)
+#define INTR_SOF_RX BIT(5)
+#define INTR_BUS_SUSPEND BIT(4)
+#define INTR_BUS_RESET BIT(3)
+#define INTR_BUS_IDLE BIT(2)
+#define INTR_SET_INTF_RX BIT(1)
+#define INTR_SET_CFG_RX BIT(0)
+
+#define DMA_STS_BUF_SHIFT (30)
+#define DMA_STS_BUF_HOST_READY (0 << DMA_STS_BUF_SHIFT)
+#define DMA_STS_BUF_DMA_BUSY BIT(30)
+#define DMA_STS_BUF_DMA_DONE (2 << DMA_STS_BUF_SHIFT)
+#define DMA_STS_BUF_HOST_BUSY (3 << DMA_STS_BUF_SHIFT)
+#define DMA_STS_BUF_MASK (3 << DMA_STS_BUF_SHIFT)
+#define DMA_STS_RX_SHIFT (28)
+#define DMA_STS_RX_SUCCESS (0 << DMA_STS_RX_SHIFT)
+#define DMA_STS_RX_ERR_DESC BIT(28)
+#define DMA_STS_RX_ERR_BUF (3 << DMA_STS_RX_SHIFT)
+#define DMA_STS_RX_MASK (3 << DMA_STS_RX_SHIFT)
+#define DMA_STS_CFG_NUM_SHIFT (24)
+#define DMA_STS_CFG_NUM_MASK (0xf << DMA_STS_CFG_NUM_SHIFT)
+#define DMA_STS_INTF_NUM_SHIFT (20)
+#define DMA_STS_INTF_NUM_MASK (0xf << DMA_STS_INTF_NUM_SHIFT)
+#define DMA_STS_LAST_DESC BIT(27)
+#define DMA_STS_FRAME_NUM_SHIFT (16)
+#define DMA_STS_FRAME_NUM_MASK (0x7ff << DMA_STS_FRAME_NUM_SHIFT)
+#define DMA_STS_BYTE_CNT_SHIFT (0)
+#define DMA_STS_ISO_PID_SHIFT (14)
+#define DMA_STS_ISO_PID_MASK (0x3 << DMA_STS_ISO_PID_SHIFT)
+#define DMA_STS_ISO_BYTE_CNT_SHIFT (DMA_STS_BYTE_CNT_SHIFT)
+#define DMA_STS_ISO_BYTE_CNT_MASK (0x3fff << DMA_STS_ISO_BYTE_CNT_SHIFT)
+#define DMA_STS_NISO_BYTE_CNT_SHIFT (DMA_STS_BYTE_CNT_SHIFT)
+#define DMA_STS_NISO_BYTE_CNT_MASK (0xffff << DMA_STS_NISO_BYTE_CNT_SHIFT)
+
+/* UDC Interrupts */
+#define UDC_IRQ_ALL (IRQ_REMOTEWAKEUP_DELTA | \
+ IRQ_SPEED_ENUM_DONE | \
+ IRQ_BUS_SUSPEND | \
+ IRQ_BUS_RESET | \
+ IRQ_BUS_IDLE | \
+ IRQ_SET_INTF | \
+ IRQ_SET_CFG)
+#define IRQ_REMOTEWAKEUP_DELTA INTR_REMOTE_WAKEUP_DELTA
+#define IRQ_SPEED_ENUM_DONE INTR_SPD_ENUM_DONE
+#define IRQ_SOF_DETECTED INTR_SOF_RX
+#define IRQ_BUS_SUSPEND INTR_BUS_SUSPEND
+#define IRQ_BUS_RESET INTR_BUS_RESET
+#define IRQ_BUS_IDLE INTR_BUS_IDLE
+#define IRQ_SET_INTF INTR_SET_INTF_RX
+#define IRQ_SET_CFG INTR_SET_CFG_RX
+
+/* Endpoint status */
+#define EP_STS_ALL (DMA_ERROR | \
+ DMA_BUF_NOT_AVAIL | \
+ IN_TOKEN_RX | \
+ IN_DMA_DONE | \
+ IN_XFER_DONE | \
+ OUT_DMA_DATA_DONE | \
+ OUT_DMA_SETUP_DONE)
+
+#define DMA_ERROR EP_STS_AHB_BUS_ERROR
+#define DMA_BUF_NOT_AVAIL EP_STS_DMA_BUF_NOT_AVAIL
+#define IN_TOKEN_RX EP_STS_IN_TOKEN_RX
+#define IN_DMA_DONE EP_STS_IN_DMA_DONE
+#define IN_FIFO_EMPTY EP_STS_IN_FIFO_EMPTY
+#define IN_XFER_DONE EP_STS_IN_XFER_DONE
+#define OUT_DMA_DATA_DONE EP_STS_OUT_DMA_DATA_DONE
+#define OUT_DMA_SETUP_DONE EP_STS_OUT_DMA_SETUP_DONE
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+#define DIRN_STR(dirn) ((dirn) == USB_DIR_IN ? "IN" : "OUT")
+#define EP_DIRN_TYPE(d, t) (((d) << 8) | (t))
+
+/* Used for ISOC IN transfers for frame alignment. */
+#define FRAME_NUM_INVALID (~(u32)0)
+
+/* UDC config parameters */
+
+/* If multiple RX FIFO controllers are implemented for
+ * OUT Endpoints, MRX_FIFO is enabled.
+ * Multi RX FIFO controllers are not implemented in RTL.
+ */
+#define MRX_FIFO 0
+#if MRX_FIFO
+static bool mrx_fifo = true;
+#else
+static bool mrx_fifo;
+#endif
+
+/* Buffer Fill mode is enabled for IN transfers,
+ * disabled for OUT transfers.
+ */
+#define IN_DMA_BUF_FILL_EN 1
+#if IN_DMA_BUF_FILL_EN
+static bool in_bf_mode = true;
+#else
+static bool in_bf_mode;
+#endif
+
+#define OUT_DMA_BUF_FILL_EN 0
+#if OUT_DMA_BUF_FILL_EN
+static bool out_bf_mode = true;
+#else
+static bool out_bf_mode;
+#endif
+/*
+ * If it desired that frames start being DMA'd w/o frame
+ * alignment, define ISOC_IN_XFER_DELAY_DISABLE.
+ * If frame alignment is used, this delay is not disabled.
+ */
+#define ISOC_IN_XFER_DELAY_DISABLE 0
+#if ISOC_IN_XFER_DELAY_DISABLE
+static bool in_isoc_delay_disabled = true;
+#else
+static bool in_isoc_delay_disabled;
+#endif
+
+/* Endpoint IN/OUT registers
+ * Register space is reserved for 16 endpoints, but the controller
+ * actually supports 10 endpoints only.
+ */
+#define EP_CNT (16)
+struct snps_ep_regs {
+ u32 ctrl; /* EP control */
+ u32 status; /* EP status */
+ u32 epreg2; /* Buffer for IN, Rec Pkt Frame num for OUT */
+ u32 epreg3; /* Max pkt size for IN, Buf size for OUT */
+ u32 setupbuf; /* Rsvd for IN, EP setup buffer ptr for OUT */
+ u32 datadesc; /* EP data descriptor pointer */
+ u32 rsvd[2];
+};
+
+/* UDC registers */
+struct snps_udc_regs {
+ struct snps_ep_regs ep_in[EP_CNT];
+ struct snps_ep_regs ep_out[EP_CNT];
+ u32 devcfg;
+ u32 devctrl;
+ u32 devstatus;
+ u32 devintrstat;
+ u32 devintrmask;
+ u32 epintrstat;
+ u32 epintrmask;
+ u32 testmode;
+ u32 releasenum;
+ u32 rsvd[56];
+ u32 epcfg[EP_CNT];
+ u32 rsvd1[175];
+ u32 rx_fifo[256];
+ u32 tx_fifo[256];
+ u32 strap;
+};
+
+/* Endpoint SETUP buffer */
+struct setup_desc {
+ u32 status;
+ u32 reserved;
+ u32 data1;
+ u32 data2;
+};
+
+/* Endpoint In/Out data descriptor */
+struct data_desc {
+ u32 status;
+ u32 reserved;
+ u32 buf_addr;
+ u32 next_desc_addr;
+};
+
+/* Endpoint descriptor layout. */
+struct ep_dma_desc {
+ struct setup_desc setup;
+ struct data_desc desc[DESC_CNT];
+};
+
+/* Endpoint descriptor array for Synopsys UDC */
+struct ep_desc_array {
+ struct ep_dma_desc ep[UDC_MAX_EP];
+};
+
+struct snps_udc;
+
+/* Endpoint data structure (for each endpoint) */
+struct snps_udc_ep {
+ struct usb_ep usb_ep;
+ const struct usb_endpoint_descriptor *desc;
+ struct list_head queue;
+ struct snps_udc *udc;
+ char name[14];
+ bool in_xfer_done;
+ u32 num;
+ u32 dirn;
+ u32 type; /* USB_ENDPOINT_XFER_xxx */
+ u32 b_ep_addr; /* dirn | type */
+ u32 max_pkt_size;
+ u32 rx_fifo_size; /* Rx FIFO ram allocated */
+ u32 tx_fifo_size; /* Tx FIFO ram allocated */
+ u32 stopped:1;
+ struct {
+ struct ep_dma_desc *virt;
+ struct ep_dma_desc *phys;
+ struct usb_request *usb_req;/* Current request being DMA'd */
+ u32 len_max; /* to use with a descriptor */
+ u32 len_done; /* Length of request DMA'd so far */
+ u32 len_rem; /* Length of request left to DMA */
+ u32 add_idx; /* descriptor chain index */
+ u32 remove_idx; /* descriptor chain index */
+ u32 buf_addr; /* Location in request to DMA */
+ u32 frame_num; /* Frame number for ISOC transfers */
+ u32 frame_incr; /* Frame number increment (period) */
+ u32 status;
+ u32 done; /* DMA/USB xfer completion indication */
+ void *aligned_buf; /* used if usb_req buf not aligned */
+ dma_addr_t aligned_addr;/* Aligned buffer physical address */
+ u32 aligned_len; /* Aligned buffer length */
+ u32 last;
+ } dma;
+};
+
+/* Endpoint xfer request structure */
+struct ep_xfer_req {
+ struct usb_request usb_req;
+ struct list_head queue;
+ dma_addr_t dma_addr_orig;
+ u32 dma_mapped:1;
+ u32 dma_aligned:1;
+};
+
+/* Controller data structure */
+struct snps_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct completion *dev_release;
+ spinlock_t lock; /* UDC spin lock variable */
+ u32 rx_fifo_space;
+ u32 tx_fifo_space;
+ struct snps_udc_ep ep[UDC_MAX_EP];
+ struct {
+ struct ep_desc_array *virt;
+ struct ep_desc_array *phys;
+ } dma;
+ struct gpio_desc *vbus_gpiod;
+ u32 vbus_active:1;
+ u32 pullup_on:1;
+ struct phy *udc_phy;
+ u32 mode;
+ struct extcon_dev *edev;
+ struct extcon_specific_cable_nb extcon_nb;
+ struct notifier_block nb;
+ struct delayed_work drd_work;
+ struct workqueue_struct *drd_wq;
+ u32 conn_type;
+};
+
+#define REG_WR(reg, val) writel(val, &reg)
+#define REG_MOD_AND(reg, val) writel(val & readl(&reg), &reg)
+#define REG_MOD_OR(reg, val) writel(val | readl(&reg), &reg)
+#define REG_MOD_MASK(reg, mask, val) writel(val | (mask & readl(&reg)), &reg)
+#define REG_RD(reg) readl(&reg)
+
+static inline void dump_regs(struct snps_udc_regs *regs)
+{
+ pr_debug("DEVCFG: 0x%x\n", REG_RD(regs->devcfg));
+ pr_debug("DEVCTRL: 0x%x\n", REG_RD(regs->devctrl));
+ pr_debug("DEVSTS: 0x%x\n", REG_RD(regs->devstatus));
+ pr_debug("DEVINTRMASK: 0x%x\n", REG_RD(regs->devintrmask));
+ pr_debug("DEVINTRSTS: 0x%x\n", REG_RD(regs->devintrstat));
+ pr_debug("EPINTRMASK: 0x%x\n", REG_RD(regs->epintrmask));
+ pr_debug("EPINTRSTS: 0x%x\n", REG_RD(regs->epintrstat));
+}
+
+static inline void bus_connect(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devctrl, ~CTRL_DISCONNECT_ENABLE);
+}
+
+static inline void bus_disconnect(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devctrl, CTRL_DISCONNECT_ENABLE);
+}
+
+static inline bool is_bus_suspend(struct snps_udc_regs *regs)
+{
+ return REG_RD(regs->devstatus) &
+ STS_BUS_SUSPENDED ? true : false;
+}
+
+static inline u32 get_alt_num(struct snps_udc_regs *regs)
+{
+ return (REG_RD(regs->devstatus) & STS_ALT_NUM_MASK)
+ >> STS_ALT_NUM_SHIFT;
+}
+
+static inline u32 get_cfg_num(struct snps_udc_regs *regs)
+{
+ return (REG_RD(regs->devstatus) & STS_CFG_NUM_MASK)
+ >> STS_CFG_NUM_SHIFT;
+}
+
+static inline u32 get_intf_num(struct snps_udc_regs *regs)
+{
+ return (REG_RD(regs->devstatus) & STS_INTF_NUM_MASK)
+ >> STS_INTF_NUM_SHIFT;
+}
+
+static inline void disable_ctrl_dma(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devctrl, ~(CTRL_DMA_IN_ENABLE |
+ CTRL_DMA_OUT_ENABLE));
+}
+
+static inline void enable_ctrl_dma(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devctrl, (CTRL_DMA_IN_ENABLE |
+ CTRL_DMA_OUT_ENABLE));
+}
+
+static inline bool is_ctrl_dma_enable(struct snps_udc_regs *regs)
+{
+ return REG_RD(regs->devctrl) &
+ CTRL_DMA_OUT_ENABLE ? true : false;
+}
+
+static inline void disable_epin_dma(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devctrl, ~(CTRL_DMA_IN_ENABLE));
+}
+
+static inline void enable_epin_dma(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devctrl, (CTRL_DMA_IN_ENABLE));
+}
+
+static inline bool is_epin_dma_enable(struct snps_udc_regs *regs)
+{
+ return REG_RD(regs->devctrl) &
+ CTRL_DMA_IN_ENABLE ? true : false;
+}
+
+static inline void disable_epout_dma(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devctrl, ~(CTRL_DMA_OUT_ENABLE));
+}
+
+static inline void enable_epout_dma(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devctrl, (CTRL_DMA_OUT_ENABLE));
+}
+
+static inline bool is_epout_dma_enable(struct snps_udc_regs *regs)
+{
+ return REG_RD(regs->devctrl) &
+ CTRL_DMA_OUT_ENABLE ? true : false;
+}
+
+static inline u32 get_frnum_last_rx(struct snps_udc_regs *regs)
+{
+ return (REG_RD(regs->devstatus) &
+ STS_SOF_FRAME_NUM_MASK) >> STS_SOF_FRAME_NUM_SHIFT;
+}
+
+static inline u32 get_irq_active(struct snps_udc_regs *regs)
+{
+ return REG_RD(regs->devintrstat);
+}
+
+static inline void clear_udc_dev_irq(struct snps_udc_regs *regs, u32 mask)
+{
+ REG_WR(regs->devintrstat, mask);
+}
+
+static inline void disable_udc_dev_irq(struct snps_udc_regs *regs, u32 mask)
+{
+ REG_MOD_OR(regs->devintrmask, mask);
+}
+
+static inline void enable_udc_dev_irq(struct snps_udc_regs *regs, u32 mask)
+{
+ REG_MOD_AND(regs->devintrmask, ~mask);
+}
+
+static inline u32 mask_irq(struct snps_udc_regs *regs)
+{
+ return (~REG_RD(regs->devintrmask)) & UDC_IRQ_ALL;
+}
+
+static inline void clear_devnak(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devctrl, ~CTRL_OUT_ALL_NAK);
+}
+
+static inline void set_devnak(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devctrl, CTRL_OUT_ALL_NAK);
+}
+
+static inline bool is_phy_error(struct snps_udc_regs *regs)
+{
+ return REG_RD(regs->devstatus) &
+ STS_PHY_ERROR ? true : false;
+}
+
+static inline bool is_rmtwkp(struct snps_udc_regs *regs)
+{
+ return REG_RD(regs->devstatus) &
+ STS_REMOTE_WAKEUP_ALLOWED ? true : false;
+}
+
+static inline void clear_rmtwkup(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devcfg, ~CFG_REMOTE_WAKEUP_ENABLE);
+}
+
+static inline void set_rmtwkp(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devcfg, CFG_REMOTE_WAKEUP_ENABLE);
+}
+
+static inline void start_rmtwkp(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devctrl, CTRL_RESUME_SIGNAL_ENABLE);
+}
+
+static inline void stop_rmtwkp(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devctrl, ~CTRL_RESUME_SIGNAL_ENABLE);
+}
+
+static inline void disable_self_pwr(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devcfg, ~CFG_SELF_PWR_ENABLE);
+}
+
+static inline void enable_self_pwr(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devcfg, CFG_SELF_PWR_ENABLE);
+}
+
+static inline void disable_set_desc(struct snps_udc_regs *regs)
+{
+ REG_MOD_AND(regs->devcfg, ~CFG_SET_DESCRIPTOR_ENABLE);
+}
+
+static inline void enable_set_desc(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devcfg, CFG_SET_DESCRIPTOR_ENABLE);
+}
+
+static inline void set_setup_done(struct snps_udc_regs *regs)
+{
+ REG_MOD_OR(regs->devctrl, CTRL_CSR_DONE);
+}
+
+static inline u32 get_enum_speed(struct snps_udc_regs *regs)
+{
+ switch (REG_RD(regs->devstatus) & STS_SPD_MASK) {
+ case STS_SPD_LS:
+ return SPEED_LOW;
+ case STS_SPD_HS:
+ return SPEED_HIGH;
+ case STS_SPD_FS:
+ case STS_SPD_FS_48MHZ:
+ return SPEED_FULL;
+ default:
+ return 0;
+ }
+}
+
+static inline void set_speed_requested(struct snps_udc_regs *regs, u32 speed)
+{
+ REG_MOD_AND(regs->devcfg, ~CFG_SPD_MASK);
+
+ switch (speed) {
+ case SPEED_LOW:
+ REG_MOD_OR(regs->devcfg, CFG_SPD_LS);
+ break;
+
+ case SPEED_HIGH:
+ REG_MOD_OR(regs->devcfg, CFG_SPD_HS);
+ break;
+
+ case SPEED_FULL:
+ default:
+ REG_MOD_OR(regs->devcfg, CFG_SPD_FS);
+ break;
+ }
+}
+
+static inline void init_ep_reg(struct snps_udc_regs *regs, u32 num, u32 type,
+ u32 dirn, u32 max_pkt_size)
+{
+ if ((type == EP_TYPE_CTRL) || (dirn == EP_DIRN_OUT)) {
+ REG_WR(regs->ep_out[num].ctrl,
+ (type << EP_CTRL_TYPE_SHIFT));
+ REG_WR(regs->ep_out[num].status,
+ regs->ep_out[num].status);
+ REG_WR(regs->ep_out[num].epreg2, 0);
+ REG_WR(regs->ep_out[num].epreg3,
+ ((max_pkt_size >> 2) << 16) | max_pkt_size);
+
+ if (mrx_fifo)
+ REG_MOD_OR(regs->ep_out[num].epreg3,
+ (FIFO_SZ_U32(max_pkt_size) <<
+ EP_REG3_OUT_DEPTH_SHIFT));
+ }
+ if ((type == EP_TYPE_CTRL) || (dirn == EP_DIRN_IN)) {
+ REG_WR(regs->ep_in[num].ctrl,
+ (type << EP_CTRL_TYPE_SHIFT));
+ REG_WR(regs->ep_in[num].epreg3,
+ (max_pkt_size << EP_REG3_PKT_MAX_SHIFT));
+ REG_WR(regs->ep_in[num].epreg2,
+ (max_pkt_size >> 2));
+ REG_MOD_OR(regs->ep_in[num].ctrl,
+ EP_CTRL_IN_FLUSH_ENABLE);
+ REG_MOD_AND(regs->ep_in[num].ctrl,
+ ~EP_CTRL_IN_FLUSH_ENABLE);
+ REG_MOD_AND(regs->ep_in[num].ctrl,
+ EP_CTRL_NAK_SET);
+ }
+ REG_WR(regs->epcfg[num],
+ (num << EP_CFG_FIFO_NUM_SHIFT) |
+ (type << EP_CFG_TYPE_SHIFT) |
+ (max_pkt_size << EP_CFG_PKT_MAX_SHIFT) |
+ ((dirn == EP_DIRN_OUT) ? EP_CFG_DIRN_OUT : EP_CFG_DIRN_IN));
+}
+
+static inline void set_ep_alt_num(struct snps_udc_regs *regs, u32 num, u32 alt)
+{
+ REG_MOD_MASK(regs->epcfg[num], ~EP_CFG_ALT_NUM_MASK,
+ (alt << EP_CFG_ALT_NUM_SHIFT));
+}
+
+static inline void set_epcfg_reg(struct snps_udc_regs *regs, u32 num, u32 cfg)
+{
+ REG_MOD_MASK(regs->epcfg[num], ~EP_CFG_CFG_NUM_MASK,
+ (cfg << EP_CFG_CFG_NUM_SHIFT));
+}
+
+static inline void set_ep_intf_num(struct snps_udc_regs *regs, u32 num,
+ u32 intf)
+{
+ REG_MOD_MASK(regs->epcfg[num], ~EP_CFG_INTF_NUM_MASK,
+ (intf << EP_CFG_INTF_NUM_SHIFT));
+}
+
+static inline void disable_ep_dma(struct snps_udc_regs *regs, u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT) {
+ if (mrx_fifo)
+ REG_MOD_AND(regs->ep_out[num].ctrl,
+ ~EP_CTRL_OUT_DMA_ENABLE);
+ } else {
+ REG_MOD_AND(regs->ep_in[num].ctrl,
+ ~EP_CTRL_IN_DMA_ENABLE);
+ }
+}
+
+static inline void enable_ep_dma(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT) {
+ if (mrx_fifo)
+ REG_MOD_OR(regs->ep_out[num].ctrl,
+ EP_CTRL_OUT_DMA_ENABLE);
+ else
+ REG_MOD_OR(regs->devctrl,
+ CTRL_DMA_OUT_ENABLE);
+ } else
+ REG_MOD_OR(regs->ep_in[num].ctrl,
+ EP_CTRL_IN_DMA_ENABLE);
+}
+
+static inline void set_setup_buf_ptr(struct snps_udc_regs *regs,
+ u32 num, u32 dirn, void *addr)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_WR(regs->ep_out[num].setupbuf, (dma_addr_t)addr);
+}
+
+static inline void set_data_desc_ptr(struct snps_udc_regs *regs,
+ u32 num, u32 dirn, void *addr)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_WR(regs->ep_out[num].datadesc, (dma_addr_t)addr);
+ else
+ REG_WR(regs->ep_in[num].datadesc, (dma_addr_t)addr);
+}
+
+static inline bool is_ep_fifo_empty(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT) {
+ if (mrx_fifo)
+ return REG_RD(regs->ep_out[num].status) &
+ EP_STS_OUT_FIFO_EMPTY ? true : false;
+ else
+ return REG_RD(regs->devstatus) &
+ STS_OUT_FIFO_EMPTY ? true : false;
+ }
+ return REG_RD(regs->ep_in[num].status) &
+ EP_STS_IN_FIFO_EMPTY ? true : false;
+}
+
+static inline void clear_ep_fifo_flush(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT) {
+ if (mrx_fifo)
+ REG_MOD_AND(regs->ep_out[num].ctrl,
+ ~EP_CTRL_OUT_FLUSH_ENABLE);
+ else
+ REG_MOD_AND(regs->devctrl,
+ ~CTRL_OUT_FIFO_FLUSH_ENABLE);
+ } else {
+ REG_MOD_AND(regs->ep_in[num].ctrl,
+ ~EP_CTRL_IN_FLUSH_ENABLE);
+ }
+}
+
+static inline void set_ep_fifo_flush(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT) {
+ if (mrx_fifo)
+ REG_MOD_OR(regs->ep_out[num].ctrl,
+ EP_CTRL_OUT_FLUSH_ENABLE);
+ else
+ REG_MOD_OR(regs->devctrl,
+ CTRL_OUT_FIFO_FLUSH_ENABLE);
+ } else {
+ REG_MOD_OR(regs->ep_in[num].ctrl,
+ EP_CTRL_IN_FLUSH_ENABLE);
+ }
+}
+
+static inline u32 get_ep_frnum(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ return (regs->ep_out[num].epreg2 &
+ EP_REG2_OUT_FRAME_NUM_MASK) >>
+ EP_REG2_OUT_FRAME_NUM_SHIFT;
+ return 0;
+}
+
+static inline void clear_udc_ep_irq(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_WR(regs->epintrstat, (1 << num) <<
+ EP_INTR_OUT_SHIFT);
+ else
+ REG_WR(regs->epintrstat, (1 << num) <<
+ EP_INTR_IN_SHIFT);
+}
+
+static inline void disable_udc_ep_irq(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT) {
+ REG_MOD_OR(regs->epintrmask, ((1 << num) <<
+ EP_INTR_OUT_SHIFT));
+ } else {
+ REG_MOD_OR(regs->epintrmask, ((1 << num) <<
+ EP_INTR_IN_SHIFT));
+ }
+}
+
+static inline void enable_udc_ep_irq(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT) {
+ REG_MOD_AND(regs->epintrmask, ~((1 << num) <<
+ EP_INTR_OUT_SHIFT));
+ } else {
+ REG_MOD_AND(regs->epintrmask, ~((1 << num) <<
+ EP_INTR_IN_SHIFT));
+ }
+}
+
+static inline u32 get_ep_irq_active(struct snps_udc_regs *regs, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ return (REG_RD(regs->epintrstat) & EP_INTR_OUT_MASK)
+ >> EP_INTR_OUT_SHIFT;
+
+ return (REG_RD(regs->epintrstat) & EP_INTR_IN_MASK)
+ >> EP_INTR_IN_SHIFT;
+}
+
+static inline void clear_udc_ep_irq_list(struct snps_udc_regs *regs,
+ u32 dirn, u32 mask)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_WR(regs->epintrstat, (mask << EP_INTR_OUT_SHIFT));
+ else
+ REG_WR(regs->epintrstat, (mask << EP_INTR_IN_SHIFT));
+}
+
+static inline u32 get_ep_status(struct snps_udc_regs *regs, u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ return REG_RD(regs->ep_out[num].status);
+
+ return REG_RD(regs->ep_in[num].status);
+}
+
+static inline void clear_ep_status(struct snps_udc_regs *regs,
+ u32 num, u32 dirn, u32 mask)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_WR(regs->ep_out[num].status, mask);
+ else
+ REG_WR(regs->ep_in[num].status, mask);
+}
+
+static inline void clear_ep_nak(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_MOD_OR(regs->ep_out[num].ctrl, EP_CTRL_NAK_CLEAR);
+ else
+ REG_MOD_OR(regs->ep_in[num].ctrl, EP_CTRL_NAK_CLEAR);
+}
+
+static inline void enable_ep_nak(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_MOD_OR(regs->ep_out[num].ctrl, EP_CTRL_NAK_SET);
+ else
+ REG_MOD_OR(regs->ep_in[num].ctrl, EP_CTRL_NAK_SET);
+}
+
+static inline void disable_ep_nak(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_MOD_AND(regs->ep_out[num].ctrl, ~EP_CTRL_NAK_SET);
+ else
+ REG_MOD_AND(regs->ep_in[num].ctrl, ~EP_CTRL_NAK_SET);
+}
+
+static inline bool is_ep_nak_inprog(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ return REG_RD(regs->ep_out[num].ctrl) &
+ EP_CTRL_NAK_IN_PROGRESS ? true : false;
+
+ return REG_RD(regs->ep_in[num].ctrl) &
+ EP_CTRL_NAK_IN_PROGRESS ? true : false;
+}
+
+static inline void disable_ep_stall(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (dirn == EP_DIRN_OUT)
+ REG_MOD_AND(regs->ep_out[num].ctrl,
+ ~EP_CTRL_STALL_ENABLE);
+ else
+ REG_MOD_AND(regs->ep_in[num].ctrl,
+ ~EP_CTRL_STALL_ENABLE);
+}
+
+static inline void enable_ep_stall(struct snps_udc_regs *regs,
+ u32 num, u32 dirn)
+{
+ if (mrx_fifo && !(REG_RD(regs->ep_out[num].status) &
+ EP_STS_OUT_FIFO_EMPTY))
+ return;
+ else if (!mrx_fifo && !(REG_RD(regs->devstatus) &
+ STS_OUT_FIFO_EMPTY))
+ return;
+
+ if (dirn == EP_DIRN_OUT)
+ REG_MOD_OR(regs->ep_out[num].ctrl,
+ EP_CTRL_STALL_ENABLE);
+ else
+ REG_MOD_OR(regs->ep_in[num].ctrl,
+ EP_CTRL_STALL_ENABLE);
+}
+
+static inline u32 get_last_rx_frnum(struct snps_udc_regs *regs)
+{
+ return (REG_RD(regs->devstatus) & STS_SOF_FRAME_NUM_MASK)
+ >> STS_SOF_FRAME_NUM_SHIFT;
+}
+
+static inline void finish_udc(struct snps_udc_regs *regs)
+{
+ u32 ep_num;
+
+ disable_ctrl_dma(regs);
+ disable_udc_dev_irq(regs, UDC_IRQ_ALL);
+ clear_udc_dev_irq(regs, UDC_IRQ_ALL);
+
+ for (ep_num = 0; ep_num < UDC_MAX_EP; ep_num++) {
+ disable_udc_ep_irq(regs, ep_num, EP_DIRN_IN);
+ clear_udc_ep_irq(regs, ep_num, EP_DIRN_IN);
+ clear_ep_status(regs, ep_num, EP_DIRN_IN,
+ get_ep_status(regs, ep_num,
+ EP_DIRN_IN));
+
+ disable_udc_ep_irq(regs, ep_num, EP_DIRN_OUT);
+ clear_udc_ep_irq(regs, ep_num, EP_DIRN_OUT);
+ clear_ep_status(regs, ep_num, EP_DIRN_OUT,
+ get_ep_status(regs, ep_num,
+ EP_DIRN_OUT));
+ }
+}
+
+static inline void init_udc_reg(struct snps_udc_regs *regs)
+{
+ finish_udc(regs);
+ REG_WR(regs->devcfg, CFG_SET_DESCRIPTOR_ENABLE
+ | CFG_UTMI_8BIT_ENABLE
+ | CFG_CSR_PROGRAM_ENABLE
+ | CFG_SPD_HS);
+ REG_WR(regs->devctrl, CTRL_LE_ENABLE
+ | CTRL_DISCONNECT_ENABLE
+ | CTRL_DMA_MODE_ENABLE
+ | CTRL_DMA_DESC_UPDATE_ENABLE
+ | CTRL_OUT_ALL_NAK
+ | CTRL_DMA_OUT_THRESH_LEN_MASK
+ | CTRL_DMA_BURST_LEN_MASK
+ | CTRL_DMA_BURST_ENABLE
+ | CTRL_OUT_FIFO_FLUSH_ENABLE
+ );
+
+ if (mrx_fifo)
+ REG_MOD_AND(regs->devctrl, ~CTRL_OUT_FIFO_FLUSH_ENABLE);
+
+ if (out_bf_mode)
+ REG_MOD_OR(regs->devctrl, CTRL_DMA_BUFF_FILL_MODE_ENABLE);
+
+ REG_WR(regs->devintrmask, IRQ_BUS_IDLE | IRQ_SOF_DETECTED);
+ REG_WR(regs->epintrmask, 0);
+}
+
+static inline struct data_desc *dma_desc_chain_alloc(struct snps_udc_ep *ep)
+{
+ u32 idx;
+
+ idx = ep->dma.add_idx++;
+
+ return &ep->dma.virt->desc[EP_DMA_DESC_IDX(idx)];
+}
+
+static inline int dma_desc_chain_is_empty(struct snps_udc_ep *ep)
+{
+ return ep->dma.add_idx == ep->dma.remove_idx;
+}
+
+static inline void dma_desc_chain_free(struct snps_udc_ep *ep)
+{
+ ep->dma.remove_idx++;
+}
+
+static inline int dma_desc_chain_is_full(struct snps_udc_ep *ep)
+{
+ return !dma_desc_chain_is_empty(ep) &&
+ (EP_DMA_DESC_IDX(ep->dma.add_idx) ==
+ EP_DMA_DESC_IDX(ep->dma.remove_idx));
+}
+
+static inline struct data_desc *dma_desc_chain_head(struct snps_udc_ep *ep)
+{
+ u32 index = EP_DMA_DESC_IDX(ep->dma.remove_idx);
+
+ return &ep->dma.virt->desc[index];
+}
+
+static inline void dma_desc_chain_reset(struct snps_udc_ep *ep)
+{
+ ep->dma.add_idx = 0;
+ ep->dma.remove_idx = 0;
+}
+#endif
--
2.1.0