Merge "arm/dt: msm8974-liquid: Enable BLSP#2 UART#1 support"
diff --git a/arch/arm/mach-msm/pil-pronto.c b/arch/arm/mach-msm/pil-pronto.c
index 0e32c11b..6bd087c 100644
--- a/arch/arm/mach-msm/pil-pronto.c
+++ b/arch/arm/mach-msm/pil-pronto.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -336,7 +336,6 @@
disable_irq_nosync(drv->irq);
drv->restart_inprogress = true;
- wcnss_pronto_log_debug_regs();
restart_wcnss(drv);
return IRQ_HANDLED;
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index cd15d71..82403d2 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -440,7 +440,7 @@
continue;
/* Do not allow un-secure heap if secure is specified */
if (secure_allocation &&
- (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP))
+ !ion_heap_allow_secure_allocation(heap->type))
continue;
trace_ion_alloc_buffer_start(client->name, heap->name, len,
heap_mask, flags);
@@ -1713,7 +1713,7 @@
buffer = handle->buffer;
heap = buffer->heap;
- if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP) {
+ if (!ion_heap_allow_handle_secure(heap->type)) {
pr_err("%s: cannot secure buffer from non secure heap\n",
__func__);
goto out_unlock;
@@ -1746,7 +1746,7 @@
buffer = handle->buffer;
heap = buffer->heap;
- if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP) {
+ if (!ion_heap_allow_handle_secure(heap->type)) {
pr_err("%s: cannot secure buffer from non secure heap\n",
__func__);
goto out_unlock;
@@ -1777,7 +1777,7 @@
mutex_lock(&dev->lock);
for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
- if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
+ if (!ion_heap_allow_heap_secure(heap->type))
continue;
if (ION_HEAP(heap->id) != heap_id)
continue;
@@ -1805,7 +1805,7 @@
mutex_lock(&dev->lock);
for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
- if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
+ if (!ion_heap_allow_heap_secure(heap->type))
continue;
if (ION_HEAP(heap->id) != heap_id)
continue;
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 920e5d0..2473dd2 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -322,4 +322,10 @@
int version, void *data, int flags);
int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle);
+
+int ion_heap_allow_secure_allocation(enum ion_heap_type type);
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type);
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type);
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index b3f2ab4..fb365ba 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -660,6 +660,21 @@
return ret;
}
+int ion_heap_allow_secure_allocation(enum ion_heap_type type)
+{
+ return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP);
+}
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type)
+{
+ return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP);
+}
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type)
+{
+ return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP);
+}
+
static long msm_ion_custom_ioctl(struct ion_client *client,
unsigned int cmd,
unsigned long arg)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9ee19ab..a037c17 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1525,6 +1525,120 @@
return MMC_BLK_SUCCESS;
}
+/*
+ * mmc_blk_reinsert_req() - re-insert request back to the scheduler
+ * @areq: request to re-insert.
+ *
+ * Request may be packed or single. When fails to reinsert request, it will be
+ * requeued to the the dispatch queue.
+ */
+static void mmc_blk_reinsert_req(struct mmc_async_req *areq)
+{
+ struct request *prq;
+ int ret = 0;
+ struct mmc_queue_req *mq_rq;
+ struct request_queue *q;
+
+ mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ q = mq_rq->req->q;
+ if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
+ while (!list_empty(&mq_rq->packed_list)) {
+ /* return requests in reverse order */
+ prq = list_entry_rq(mq_rq->packed_list.prev);
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(q->queue_lock);
+ ret = blk_reinsert_request(q, prq);
+ if (ret) {
+ blk_requeue_request(q, prq);
+ spin_unlock_irq(q->queue_lock);
+ goto reinsert_error;
+ }
+ spin_unlock_irq(q->queue_lock);
+ }
+ } else {
+ spin_lock_irq(q->queue_lock);
+ ret = blk_reinsert_request(q, mq_rq->req);
+ if (ret)
+ blk_requeue_request(q, mq_rq->req);
+ spin_unlock_irq(q->queue_lock);
+ }
+ return;
+
+reinsert_error:
+ pr_err("%s: blk_reinsert_request() failed (%d)",
+ mq_rq->req->rq_disk->disk_name, ret);
+ /*
+ * -EIO will be reported for this request and rest of packed_list.
+ * Urgent request will be proceeded anyway, while upper layer
+ * responsibility to re-send failed requests
+ */
+ while (!list_empty(&mq_rq->packed_list)) {
+ prq = list_entry_rq(mq_rq->packed_list.next);
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, prq);
+ spin_unlock_irq(q->queue_lock);
+ }
+}
+
+/*
+ * mmc_blk_update_interrupted_req() - update of the stopped request
+ * @card: the MMC card associated with the request.
+ * @areq: interrupted async request.
+ *
+ * Get stopped request state from card and update successfully done part of
+ * the request by setting packed_fail_idx. The packed_fail_idx is index of
+ * first uncompleted request in packed request list, for non-packed request
+ * packed_fail_idx remains unchanged.
+ *
+ * Returns: MMC_BLK_SUCCESS for success, MMC_BLK_ABORT otherwise
+ */
+static int mmc_blk_update_interrupted_req(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ int ret = MMC_BLK_SUCCESS;
+ u8 *ext_csd;
+ int correctly_done;
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request *prq;
+ u8 req_index = 0;
+
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE)
+ return MMC_BLK_SUCCESS;
+
+ ext_csd = kmalloc(512, GFP_KERNEL);
+ if (!ext_csd)
+ return MMC_BLK_ABORT;
+
+ /* get correctly programmed sectors number from card */
+ ret = mmc_send_ext_csd(card, ext_csd);
+ if (ret) {
+ pr_err("%s: error %d reading ext_csd\n",
+ mmc_hostname(card->host), ret);
+ ret = MMC_BLK_ABORT;
+ goto exit;
+ }
+ correctly_done = card->ext_csd.data_sector_size *
+ (ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 0] << 0 |
+ ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 1] << 8 |
+ ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 2] << 16 |
+ ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 3] << 24);
+
+ list_for_each_entry(prq, &mq_rq->packed_list, queuelist) {
+ if ((correctly_done - (int)blk_rq_bytes(prq)) < 0) {
+ /* prq is not successfull */
+ mq_rq->packed_fail_idx = req_index;
+ break;
+ }
+ correctly_done -= blk_rq_bytes(prq);
+ req_index++;
+ }
+exit:
+ kfree(ext_csd);
+ return ret;
+}
+
static int mmc_blk_packed_err_check(struct mmc_card *card,
struct mmc_async_req *areq)
{
@@ -1721,6 +1835,9 @@
mqrq->mmc_active.err_check = mq->err_check_fn;
else
mqrq->mmc_active.err_check = mmc_blk_err_check;
+ mqrq->mmc_active.reinsert_req = mmc_blk_reinsert_req;
+ mqrq->mmc_active.update_interrupted_req =
+ mmc_blk_update_interrupted_req;
mmc_queue_bounce_pre(mqrq);
}
@@ -2064,6 +2181,11 @@
if (mq->packed_test_fn)
mq->packed_test_fn(mq->queue, mqrq);
+
+ mqrq->mmc_active.reinsert_req = mmc_blk_reinsert_req;
+ mqrq->mmc_active.update_interrupted_req =
+ mmc_blk_update_interrupted_req;
+
mmc_queue_bounce_pre(mqrq);
}
@@ -2207,6 +2329,19 @@
mmc_queue_bounce_post(mq_rq);
switch (status) {
+ case MMC_BLK_URGENT:
+ if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
+ /* complete successfully transmitted part */
+ if (mmc_blk_end_packed_req(mq_rq))
+ /* process for not transmitted part */
+ mmc_blk_reinsert_req(areq);
+ } else {
+ mmc_blk_reinsert_req(areq);
+ }
+
+ mq->flags |= MMC_QUEUE_URGENT_REQUEST;
+ ret = 0;
+ break;
case MMC_BLK_SUCCESS:
case MMC_BLK_PARTIAL:
/*
@@ -2377,6 +2512,7 @@
mmc_blk_write_packing_control(mq, req);
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ mq->flags &= ~MMC_QUEUE_URGENT_REQUEST;
if (req && req->cmd_flags & REQ_SANITIZE) {
/* complete ongoing async transfer before issuing sanitize */
if (card->host && card->host->areq)
@@ -2406,7 +2542,8 @@
}
out:
- if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) {
+ if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
+ (mq->flags & MMC_QUEUE_URGENT_REQUEST)) {
if (mmc_card_need_bkops(card))
mmc_start_bkops(card, false);
/* release host only when there are no more requests */
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index c4b2d16..64ece67 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -77,8 +77,10 @@
set_current_state(TASK_RUNNING);
mq->issue_fn(mq, req);
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
continue; /* fetch again */
+ } else if (mq->flags & MMC_QUEUE_URGENT_REQUEST) {
+ mq->mqrq_cur->brq.mrq.data = NULL;
+ mq->mqrq_cur->req = NULL;
}
/*
@@ -144,6 +146,47 @@
wake_up_process(mq->thread);
}
+/*
+ * mmc_urgent_request() - Urgent MMC request handler.
+ * @q: request queue.
+ *
+ * This is called when block layer has urgent request for delivery. When mmc
+ * context is waiting for the current request to complete, it will be awaken,
+ * current request may be interrupted and re-inserted back to block device
+ * request queue. The next fetched request should be urgent request, this
+ * will be ensured by block i/o scheduler.
+ */
+static void mmc_urgent_request(struct request_queue *q)
+{
+ unsigned long flags;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_context_info *cntx;
+
+ if (!mq) {
+ mmc_request(q);
+ return;
+ }
+ cntx = &mq->card->host->context_info;
+
+ /* critical section with mmc_wait_data_done() */
+ spin_lock_irqsave(&cntx->lock, flags);
+
+ /* do stop flow only when mmc thread is waiting for done */
+ if (cntx->is_waiting) {
+ /*
+ * Urgent request must be executed alone
+ * so disable the write packing
+ */
+ mmc_blk_disable_wr_packing(mq);
+ cntx->is_urgent = true;
+ spin_unlock_irqrestore(&cntx->lock, flags);
+ wake_up_interruptible(&cntx->wait);
+ } else {
+ spin_unlock_irqrestore(&cntx->lock, flags);
+ mmc_request(q);
+ }
+}
+
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
{
struct scatterlist *sg;
@@ -211,6 +254,11 @@
if (!mq->queue)
return -ENOMEM;
+ if ((host->caps2 & MMC_CAP2_STOP_REQUEST) &&
+ host->ops->stop_request &&
+ mq->card->ext_csd.hpi)
+ blk_urgent_request(mq->queue, mmc_urgent_request);
+
memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 119b0c7..99c3c60 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -39,8 +39,9 @@
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
-#define MMC_QUEUE_SUSPENDED (1 << 0)
-#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+#define MMC_QUEUE_SUSPENDED (1 << 0)
+#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+#define MMC_QUEUE_URGENT_REQUEST (1 << 2)
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a0a184d..be4315e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -162,6 +162,13 @@
#endif /* CONFIG_FAIL_MMC_REQUEST */
+static inline void mmc_update_clk_scaling(struct mmc_host *host)
+{
+ if (host->clk_scaling.enable)
+ host->clk_scaling.busy_time_us +=
+ ktime_to_us(ktime_sub(ktime_get(),
+ host->clk_scaling.start_busy));
+}
/**
* mmc_request_done - finish processing an MMC request
* @host: MMC host which completed request
@@ -177,10 +184,8 @@
#ifdef CONFIG_MMC_PERF_PROFILING
ktime_t diff;
#endif
- if (host->card && host->clk_scaling.enable)
- host->clk_scaling.busy_time_us +=
- ktime_to_us(ktime_sub(ktime_get(),
- host->clk_scaling.start_busy));
+ if (host->card)
+ mmc_update_clk_scaling(host);
if (err && cmd->retries && mmc_host_is_spi(host)) {
if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
@@ -630,6 +635,81 @@
}
/*
+ * mmc_should_stop_curr_req() - check for stop flow rationality
+ * @host: MMC host running request.
+ *
+ * Check possibility to interrupt current running request
+ * Returns true in case it is worth to stop transfer,
+ * false otherwise
+ */
+static bool mmc_should_stop_curr_req(struct mmc_host *host)
+{
+ int remainder;
+
+ remainder = (host->ops->get_xfer_remain) ?
+ host->ops->get_xfer_remain(host) : -1;
+ return (remainder > 0);
+}
+
+/*
+ * mmc_stop_request() - Stops current running request
+ * @host: MMC host to prepare the command.
+ *
+ * Triggers stop flow in the host driver and sends CMD12 (stop command) to the
+ * card. Sends HPI to get the card out of R1_STATE_PRG immediately
+ *
+ * Returns 0 when success, error propagated otherwise
+ */
+static int mmc_stop_request(struct mmc_host *host)
+{
+ struct mmc_command cmd = {0};
+ struct mmc_card *card = host->card;
+ int err = 0;
+ u32 status;
+
+ if (!host->ops->stop_request || !card->ext_csd.hpi) {
+ pr_warn("%s: host ops stop_request() or HPI not supported\n",
+ mmc_hostname(host));
+ return -ENOTSUPP;
+ }
+ err = host->ops->stop_request(host);
+ if (err) {
+ pr_err("%s: Call to host->ops->stop_request() failed (%d)\n",
+ mmc_hostname(host), err);
+ goto out;
+ }
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(host, &cmd, 0);
+ if (err) {
+ err = mmc_send_status(card, &status);
+ if (err) {
+ pr_err("%s: Get card status fail\n",
+ mmc_hostname(card->host));
+ goto out;
+ }
+ switch (R1_CURRENT_STATE(status)) {
+ case R1_STATE_DATA:
+ case R1_STATE_RCV:
+ pr_err("%s: CMD12 fails with error (%d)\n",
+ mmc_hostname(host), err);
+ goto out;
+ default:
+ break;
+ }
+ }
+ err = mmc_interrupt_hpi(card);
+ if (err) {
+ pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
+ mmc_hostname(host), err);
+ goto out;
+ }
+out:
+ return err;
+}
+
+/*
* mmc_wait_for_data_req_done() - wait for request completed
* @host: MMC host to prepare the command.
* @mrq: MMC request to wait for
@@ -646,14 +726,20 @@
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
+ bool pending_is_urgent = false;
+ bool is_urgent = false;
int err;
unsigned long flags;
while (1) {
+ context_info->is_waiting = true;
wait_io_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
- context_info->is_new_req));
+ context_info->is_new_req ||
+ context_info->is_urgent));
spin_lock_irqsave(&context_info->lock, flags);
+ is_urgent = context_info->is_urgent;
+ context_info->is_waiting = false;
context_info->is_waiting_last_req = false;
spin_unlock_irqrestore(&context_info->lock, flags);
if (context_info->is_done_rcv) {
@@ -664,6 +750,18 @@
mmc_card_removed(host->card)) {
err = host->areq->err_check(host->card,
host->areq);
+ if (pending_is_urgent || is_urgent) {
+ /*
+ * all the success/partial operations
+ * are done in an addition to handling
+ * the urgent request
+ */
+ if ((err == MMC_BLK_PARTIAL) ||
+ (err == MMC_BLK_SUCCESS))
+ err = MMC_BLK_URGENT;
+ /* reset is_urgent for next request */
+ context_info->is_urgent = false;
+ }
break; /* return err */
} else {
pr_info("%s: req failed (CMD%u): %d, retrying...\n",
@@ -672,14 +770,61 @@
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
- continue; /* wait for done/new event again */
+ /*
+ * ignore urgent flow, request retry has greater
+ * priority than urgent flow
+ */
+ context_info->is_urgent = false;
+ /* wait for done/new/urgent event again */
+ continue;
}
- } else if (context_info->is_new_req) {
+ } else if (context_info->is_new_req && !is_urgent) {
context_info->is_new_req = false;
if (!next_req) {
err = MMC_BLK_NEW_REQUEST;
break; /* return err */
}
+ } else {
+ /*
+ * The case when block layer sent next urgent
+ * notification before it receives end_io on
+ * the current
+ */
+ BUG_ON(pending_is_urgent == true);
+
+ context_info->is_urgent = false;
+ context_info->is_new_req = false;
+ if (mmc_should_stop_curr_req(host)) {
+ err = mmc_stop_request(host);
+ if (err && !context_info->is_done_rcv) {
+ err = MMC_BLK_ABORT;
+ break;
+ }
+ /* running request has finished at this point */
+ if (context_info->is_done_rcv) {
+ err = host->areq->err_check(host->card,
+ host->areq);
+ context_info->is_done_rcv = false;
+ break; /* return err */
+ } else {
+ mmc_update_clk_scaling(host);
+ }
+ err = host->areq->update_interrupted_req(
+ host->card, host->areq);
+ if (!err)
+ err = MMC_BLK_URGENT;
+ break; /* return err */
+ } else {
+ /*
+ * The flow will back to wait for is_done_rcv,
+ * but in this case original is_urgent cleared.
+ * Mark pending_is_urgent to differentiate the
+ * case, when is_done_rcv and is_urgent really
+ * concurrent.
+ */
+ pending_is_urgent = true;
+ continue; /* wait for done/new/urgent event */
+ }
}
} /* while */
return err;
@@ -788,13 +933,27 @@
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
- if (areq)
+ if (areq) {
+ /*
+ * start waiting here for possible interrupt
+ * because mmc_pre_req() taking long time
+ */
+ host->context_info.is_waiting = true;
mmc_pre_req(host, areq->mrq, !host->areq);
+ }
if (host->areq) {
err = mmc_wait_for_data_req_done(host, host->areq->mrq,
areq);
- if (err == MMC_BLK_NEW_REQUEST) {
+ if (err == MMC_BLK_URGENT) {
+ mmc_post_req(host, host->areq->mrq, 0);
+ if (areq) { /* reinsert ready request */
+ areq->reinsert_req(areq);
+ mmc_post_req(host, areq->mrq, 0);
+ }
+ host->areq = NULL;
+ goto exit;
+ } else if (err == MMC_BLK_NEW_REQUEST) {
if (error)
*error = err;
/*
@@ -831,6 +990,7 @@
else
host->areq = areq;
+exit:
if (error)
*error = err;
return data;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 89f2704..2e78f33 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1325,7 +1325,12 @@
else if (host->curr.use_wr_data_pend)
datactrl |= MCI_DATA_PEND;
- clks = (unsigned long long)data->timeout_ns * host->clk_rate;
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
+ clks = (unsigned long long)data->timeout_ns *
+ (host->clk_rate / 2);
+ else
+ clks = (unsigned long long)data->timeout_ns * host->clk_rate;
+
do_div(clks, 1000000000UL);
timeout = data->timeout_clks + (unsigned int)clks*2 ;
WARN(!timeout,
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index 4a455b6..e6613e8 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -539,6 +539,25 @@
kfree(f->name);
}
+static void frmnet_purge_responses(struct f_rmnet *dev)
+{
+ unsigned long flags;
+ struct rmnet_ctrl_pkt *cpkt;
+
+ pr_debug("%s: port#%d\n", __func__, dev->port_num);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ while (!list_empty(&dev->cpkt_resp_q)) {
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
+ struct rmnet_ctrl_pkt, list);
+
+ list_del(&cpkt->list);
+ rmnet_free_ctrl_pkt(cpkt);
+ }
+ atomic_set(&dev->notify_count, 0);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
static void frmnet_suspend(struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
@@ -549,6 +568,8 @@
__func__, xport_to_str(dxport),
dev, dev->port_num);
+ frmnet_purge_responses(dev);
+
port_num = rmnet_ports[dev->port_num].data_xport_num;
switch (dxport) {
case USB_GADGET_XPORT_BAM:
@@ -602,8 +623,6 @@
static void frmnet_disable(struct usb_function *f)
{
struct f_rmnet *dev = func_to_rmnet(f);
- unsigned long flags;
- struct rmnet_ctrl_pkt *cpkt;
pr_debug("%s: port#%d\n", __func__, dev->port_num);
@@ -612,16 +631,7 @@
atomic_set(&dev->online, 0);
- spin_lock_irqsave(&dev->lock, flags);
- while (!list_empty(&dev->cpkt_resp_q)) {
- cpkt = list_first_entry(&dev->cpkt_resp_q,
- struct rmnet_ctrl_pkt, list);
-
- list_del(&cpkt->list);
- rmnet_free_ctrl_pkt(cpkt);
- }
- atomic_set(&dev->notify_count, 0);
- spin_unlock_irqrestore(&dev->lock, flags);
+ frmnet_purge_responses(dev);
gport_rmnet_disconnect(dev);
}
@@ -709,11 +719,11 @@
ret = usb_ep_queue(dev->notify, dev->notify_req, GFP_ATOMIC);
if (ret) {
- atomic_dec(&dev->notify_count);
spin_lock_irqsave(&dev->lock, flags);
- cpkt = list_first_entry(&dev->cpkt_resp_q,
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ atomic_dec(&dev->notify_count);
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
struct rmnet_ctrl_pkt, list);
- if (cpkt) {
list_del(&cpkt->list);
rmnet_free_ctrl_pkt(cpkt);
}
@@ -739,10 +749,8 @@
static void frmnet_disconnect(struct grmnet *gr)
{
struct f_rmnet *dev;
- unsigned long flags;
struct usb_cdc_notification *event;
int status;
- struct rmnet_ctrl_pkt *cpkt;
if (!gr) {
pr_err("%s: Invalid grmnet:%p\n", __func__, gr);
@@ -776,17 +784,7 @@
__func__, status);
}
- spin_lock_irqsave(&dev->lock, flags);
- while (!list_empty(&dev->cpkt_resp_q)) {
- cpkt = list_first_entry(&dev->cpkt_resp_q,
- struct rmnet_ctrl_pkt, list);
-
- list_del(&cpkt->list);
- rmnet_free_ctrl_pkt(cpkt);
- }
- atomic_set(&dev->notify_count, 0);
- spin_unlock_irqrestore(&dev->lock, flags);
-
+ frmnet_purge_responses(dev);
}
static int
@@ -876,11 +874,11 @@
status = usb_ep_queue(dev->notify, req, GFP_ATOMIC);
if (status) {
- atomic_dec(&dev->notify_count);
spin_lock_irqsave(&dev->lock, flags);
- cpkt = list_first_entry(&dev->cpkt_resp_q,
+ if (!list_empty(&dev->cpkt_resp_q)) {
+ atomic_dec(&dev->notify_count);
+ cpkt = list_first_entry(&dev->cpkt_resp_q,
struct rmnet_ctrl_pkt, list);
- if (cpkt) {
list_del(&cpkt->list);
rmnet_free_ctrl_pkt(cpkt);
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index adbf217..4b14934 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -976,9 +976,10 @@
/* PCI errors [4.15.2.4] */
if (unlikely ((status & STS_FATAL) != 0)) {
ehci_err(ehci, "fatal error\n");
- if (hcd->driver->dump_regs)
+ if (hcd->driver->dump_regs) {
hcd->driver->dump_regs(hcd);
- panic("System error\n");
+ panic("System error\n");
+ }
dbg_cmd(ehci, "fatal", cmd);
dbg_status(ehci, "fatal", status);
ehci_halt(ehci);
diff --git a/drivers/usb/host/ehci-msm-hsic.c b/drivers/usb/host/ehci-msm-hsic.c
index 67040e5..fc4d1b6 100644
--- a/drivers/usb/host/ehci-msm-hsic.c
+++ b/drivers/usb/host/ehci-msm-hsic.c
@@ -1965,6 +1965,11 @@
ehci_hsic_msm_debugfs_cleanup();
device_init_wakeup(&pdev->dev, 0);
+
+ /* If the device was removed no need to call pm_runtime_disable */
+ if (pdev->dev.power.power_state.event != PM_EVENT_INVALID)
+ pm_runtime_disable(&pdev->dev);
+
pm_runtime_set_suspended(&pdev->dev);
destroy_workqueue(ehci_wq);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d683856..dd61824 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -186,6 +186,7 @@
struct mmc_host;
struct sdio_func;
struct sdio_func_tuple;
+struct mmc_queue;
#define SDIO_MAX_FUNCS 7
@@ -213,6 +214,7 @@
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
MMC_BLK_NEW_REQUEST,
+ MMC_BLK_URGENT,
};
struct mmc_wr_pack_stats {
@@ -612,5 +614,5 @@
extern struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(
struct mmc_card *card);
extern void mmc_blk_init_packed_statistics(struct mmc_card *card);
-
+extern void mmc_blk_disable_wr_packing(struct mmc_queue *mq);
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index cb8e4f3..1a3c662 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -140,6 +140,8 @@
void (*hw_reset)(struct mmc_host *host);
unsigned long (*get_max_frequency)(struct mmc_host *host);
unsigned long (*get_min_frequency)(struct mmc_host *host);
+ int (*stop_request)(struct mmc_host *host);
+ unsigned int (*get_xfer_remain)(struct mmc_host *host);
};
struct mmc_card;
@@ -153,13 +155,25 @@
* Returns 0 if success otherwise non zero.
*/
int (*err_check) (struct mmc_card *, struct mmc_async_req *);
+ /* Reinserts request back to the block layer */
+ void (*reinsert_req) (struct mmc_async_req *);
+ /* update what part of request is not done (packed_fail_idx) */
+ int (*update_interrupted_req) (struct mmc_card *,
+ struct mmc_async_req *);
};
/**
* mmc_context_info - synchronization details for mmc context
* @is_done_rcv wake up reason was done request
* @is_new_req wake up reason was new request
- * @is_waiting_last_req mmc context waiting for single running request
+ * @is_waiting_last_req is true, when 1 request running on the bus and
+ * NULL fetched as second request. MMC_BLK_NEW_REQUEST
+ * notification will wake up mmc thread from waiting.
+ * @is_urgent wake up reason was urgent request
+ * @is_waiting is true, when first request is running on the bus,
+ * second request preparation started or mmc thread is
+ * waiting for the completion of the current request
+ * (latter case is like @is_waiting_last_req)
* @wait wait queue
* @lock lock to protect data fields
*/
@@ -167,6 +181,8 @@
bool is_done_rcv;
bool is_new_req;
bool is_waiting_last_req;
+ bool is_urgent;
+ bool is_waiting;
wait_queue_head_t wait;
spinlock_t lock;
};
@@ -269,6 +285,7 @@
#define MMC_CAP2_SANITIZE (1 << 15) /* Support Sanitize */
#define MMC_CAP2_INIT_BKOPS (1 << 16) /* Need to set BKOPS_EN */
#define MMC_CAP2_CLK_SCALE (1 << 17) /* Allow dynamic clk scaling */
+#define MMC_CAP2_STOP_REQUEST (1 << 18) /* Allow stop ongoing request */
mmc_pm_flag_t pm_caps; /* supported pm features */
int clk_requests; /* internal reference counter */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index a8eda93..7f316a9 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -323,6 +323,7 @@
#define EXT_CSD_PWR_CL_200_360 237 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */
#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */
+#define EXT_CSD_CORRECTLY_PRG_SECTORS_NUM 242 /* RO, 4 bytes */
#define EXT_CSD_BKOPS_STATUS 246 /* RO */
#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index 3b14c44..b8a4a86 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -54,6 +54,7 @@
#define TABLA_CFILT_SLOW_MODE 0x40
#define MBHC_FW_READ_ATTEMPTS 15
#define MBHC_FW_READ_TIMEOUT 2000000
+#define MBHC_VDDIO_SWITCH_WAIT_MS 10
#define SLIM_CLOSE_TIMEOUT 1000
@@ -7142,6 +7143,48 @@
usleep_range(5000, 5000);
}
+
+static void tabla_codec_onoff_vddio_switch(struct snd_soc_codec *codec, bool on)
+{
+ bool override;
+ struct tabla_priv *tabla = snd_soc_codec_get_drvdata(codec);
+
+ pr_debug("%s: enter\n", __func__);
+ if (on) {
+ override = snd_soc_read(codec, TABLA_A_CDC_MBHC_B1_CTL) & 0x04;
+ if (!override)
+ tabla_turn_onoff_override(codec, true);
+
+ /* enable the vddio switch */
+ snd_soc_update_bits(codec, tabla->mbhc_bias_regs.mbhc_reg,
+ 0x91, 0x81);
+
+ /* deroute the override from MicBias2 to MicBias4 */
+ snd_soc_update_bits(codec, TABLA_A_MICB_1_MBHC,
+ 0x03, 0x03);
+
+ usleep_range(MBHC_VDDIO_SWITCH_WAIT_MS * 1000,
+ MBHC_VDDIO_SWITCH_WAIT_MS * 1000);
+
+ if (!override)
+ tabla_turn_onoff_override(codec, false);
+ tabla->mbhc_micbias_switched = true;
+ pr_debug("%s: VDDIO switch enabled\n", __func__);
+
+ } else {
+
+ snd_soc_update_bits(codec, tabla->mbhc_bias_regs.mbhc_reg,
+ 0x91, 0x00);
+
+ /* reroute the override to MicBias2 */
+ snd_soc_update_bits(codec, TABLA_A_MICB_1_MBHC,
+ 0x03, 0x01);
+
+ tabla->mbhc_micbias_switched = false;
+ pr_debug("%s: VDDIO switch disabled\n", __func__);
+ }
+}
+
/* called under codec_resource_lock acquisition and mbhc override = 1 */
static enum tabla_mbhc_plug_type
tabla_codec_get_plug_type(struct snd_soc_codec *codec, bool highhph)
@@ -7182,7 +7225,7 @@
* Nth: check voltage range with VDDIO switch */
for (i = 0; i < num_det; i++) {
gndswitch = (i == (num_det - 2));
- vddioswitch = (i == (num_det - 1)) || (i == (num_det - 2));
+ vddioswitch = (i == (num_det - 1));
if (i == 0) {
mb_v[i] = tabla_codec_setup_hs_polling(codec);
mic_mv[i] = tabla_codec_sta_dce_v(codec, 1 , mb_v[i]);
@@ -7192,8 +7235,8 @@
scaled = mic_mv[i];
} else {
if (vddioswitch)
- __tabla_codec_switch_micbias(tabla->codec, 1,
- false, false);
+ tabla_codec_onoff_vddio_switch(codec, true);
+
if (gndswitch)
tabla_codec_hphr_gnd_switch(codec, true);
mb_v[i] = __tabla_codec_sta_dce(codec, 1, true, true);
@@ -7217,8 +7260,7 @@
if (gndswitch)
tabla_codec_hphr_gnd_switch(codec, false);
if (vddioswitch)
- __tabla_codec_switch_micbias(tabla->codec, 0,
- false, false);
+ tabla_codec_onoff_vddio_switch(codec, false);
}
pr_debug("%s: DCE #%d, %04x, V %d, scaled V %d, GND %d, "
"VDDIO %d, inval %d\n", __func__,