Merge "usb: serial: Add a check for current autosuspend-delay expiration" into msm-3.4
diff --git a/arch/arm/boot/dts/msm-pm8941.dtsi b/arch/arm/boot/dts/msm-pm8941.dtsi
index 6737e89..51ec10c 100644
--- a/arch/arm/boot/dts/msm-pm8941.dtsi
+++ b/arch/arm/boot/dts/msm-pm8941.dtsi
@@ -332,6 +332,25 @@
interrupts = <0x0 0x61 0x1>;
};
};
+
+ vadc@3100 {
+ compatible = "qcom,qpnp-vadc";
+ reg = <0x3100 0x100>;
+ interrupts = <0x0 0x31 0x0>;
+ qcom,adc-bit-resolution = <15>;
+ qcom,adc-vdd-reference = <1800>;
+
+ chan@0 {
+ label = "usb_in";
+ qcom,channel-num = <0>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <20>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+ };
};
qcom,pm8941@1 {
diff --git a/arch/arm/boot/dts/msm8974-regulator.dtsi b/arch/arm/boot/dts/msm8974-regulator.dtsi
index 91894de..b376544 100644
--- a/arch/arm/boot/dts/msm8974-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8974-regulator.dtsi
@@ -218,9 +218,9 @@
status = "okay";
pm8941_l3: regulator-l3 {
parent-supply = <&pm8941_s1>;
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- qcom,init-voltage = <1200000>;
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,init-voltage = <1225000>;
status = "okay";
};
};
@@ -229,9 +229,9 @@
status = "okay";
pm8941_l4: regulator-l4 {
parent-supply = <&pm8941_s1>;
- regulator-min-microvolt = <1150000>;
- regulator-max-microvolt = <1150000>;
- qcom,init-voltage = <1150000>;
+ regulator-min-microvolt = <12250000>;
+ regulator-max-microvolt = <12250000>;
+ qcom,init-voltage = <12250000>;
status = "okay";
};
};
@@ -303,9 +303,9 @@
status = "okay";
pm8941_l11: regulator-l11 {
parent-supply = <&pm8941_s1>;
- regulator-min-microvolt = <1250000>;
- regulator-max-microvolt = <1250000>;
- qcom,init-voltage = <1250000>;
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,init-voltage = <1300000>;
status = "okay";
};
};
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index b9f26d9..00b4164 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -51,6 +51,7 @@
CONFIG_MSM_TZ_LOG=y
CONFIG_MSM_DIRECT_SCLK_ACCESS=y
CONFIG_MSM_OCMEM=y
+CONFIG_MSM_MEMORY_DUMP=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index e1755de..3047693 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1181,7 +1181,7 @@
/*NOTE: with mmu enabled, gpuaddr doesn't mean
* anything to mmap().
*/
- shadowprop.gpuaddr = device->memstore.physaddr;
+ shadowprop.gpuaddr = device->memstore.gpuaddr;
shadowprop.size = device->memstore.size;
/* GSL needs this to be set, even if it
appears to be meaningless */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 278be99..fa204c3 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2273,7 +2273,8 @@
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ result = remap_pfn_range(vma, vma->vm_start,
+ device->memstore.physaddr >> PAGE_SHIFT,
vma_size, vma->vm_page_prot);
if (result != 0)
KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
@@ -2327,7 +2328,7 @@
/* Handle leagacy behavior for memstore */
- if (vma_offset == device->memstore.physaddr)
+ if (vma_offset == device->memstore.gpuaddr)
return kgsl_mmap_memstore(device, vma);
/* Find a chunk of GPU memory */
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 894860b..e8d9e04 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -1135,7 +1135,7 @@
goto free_res;
}
- rate = c_data->vc_format.clk_freq;
+ rate = c_data->vc_format.clk_freq / 100 * 102;
rate_rc = clk_round_rate(dev->vcap_clk, rate);
if (rate_rc <= 0) {
pr_err("%s: Failed core rnd_rate\n", __func__);
@@ -1251,7 +1251,7 @@
goto free_res;
}
- rate = c_data->vc_format.clk_freq;
+ rate = c_data->vc_format.clk_freq / 100 * 102;
rate_rc = clk_round_rate(dev->vcap_clk, rate);
if (rate_rc <= 0) {
pr_err("%s: Failed core rnd_rate\n", __func__);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbefe67..3c79917 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -900,8 +900,8 @@
{
struct virtnet_info *vi = netdev_priv(dev);
- ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
- ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
+ ring->rx_max_pending = virtqueue_get_impl_size(vi->rvq);
+ ring->tx_max_pending = virtqueue_get_impl_size(vi->svq);
ring->rx_pending = ring->rx_max_pending;
ring->tx_pending = ring->tx_max_pending;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 5aa43c3..7e6fd75 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -172,7 +172,7 @@
}
/**
- * virtqueue_add_buf - expose buffer to other end
+ * vring_add_buf - expose buffer to other end
* @vq: the struct virtqueue we're talking about.
* @sg: the description of the buffer(s).
* @out_num: the number of sg readable by other side
@@ -188,7 +188,7 @@
* positive return values as "available": indirect buffers mean that
* we can put an entire sg[] array inside a single queue entry.
*/
-int virtqueue_add_buf(struct virtqueue *_vq,
+static int vring_add_buf(struct virtqueue *_vq,
struct scatterlist sg[],
unsigned int out,
unsigned int in,
@@ -288,20 +288,19 @@
return vq->num_free;
}
-EXPORT_SYMBOL_GPL(virtqueue_add_buf);
/**
- * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * vring_kick_prepare - first half of split vring_kick call.
* @vq: the struct virtqueue
*
- * Instead of virtqueue_kick(), you can do:
- * if (virtqueue_kick_prepare(vq))
- * virtqueue_notify(vq);
+ * Instead of vring_kick(), you can do:
+ * if (vring_kick_prepare(vq))
+ * vring_kick_notify(vq);
*
- * This is sometimes useful because the virtqueue_kick_prepare() needs
- * to be serialized, but the actual virtqueue_notify() call does not.
+ * This is sometimes useful because the vring_kick_prepare() needs
+ * to be serialized, but the actual vring_kick_notify() call does not.
*/
-bool virtqueue_kick_prepare(struct virtqueue *_vq)
+static bool vring_kick_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old;
@@ -333,39 +332,36 @@
END_USE(vq);
return needs_kick;
}
-EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
/**
- * virtqueue_notify - second half of split virtqueue_kick call.
+ * vring_kick_notify - second half of split virtqueue_kick call.
* @vq: the struct virtqueue
*
* This does not need to be serialized.
*/
-void virtqueue_notify(struct virtqueue *_vq)
+static void vring_kick_notify(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
/* Prod other side to tell it about changes. */
vq->notify(_vq);
}
-EXPORT_SYMBOL_GPL(virtqueue_notify);
/**
- * virtqueue_kick - update after add_buf
+ * vring_kick - update after add_buf
* @vq: the struct virtqueue
*
- * After one or more virtqueue_add_buf calls, invoke this to kick
+ * After one or more vring_add_buf calls, invoke this to kick
* the other side.
*
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-void virtqueue_kick(struct virtqueue *vq)
+static void vring_kick(struct virtqueue *vq)
{
- if (virtqueue_kick_prepare(vq))
- virtqueue_notify(vq);
+ if (vring_kick_prepare(vq))
+ vring_kick_notify(vq);
}
-EXPORT_SYMBOL_GPL(virtqueue_kick);
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
{
@@ -398,7 +394,7 @@
}
/**
- * virtqueue_get_buf - get the next used buffer
+ * vring_get_buf - get the next used buffer
* @vq: the struct virtqueue we're talking about.
* @len: the length written into the buffer
*
@@ -411,9 +407,9 @@
* operations at the same time (except where noted).
*
* Returns NULL if there are no used buffers, or the "data" token
- * handed to virtqueue_add_buf().
+ * handed to vring_add_buf().
*/
-void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
+static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
@@ -468,10 +464,9 @@
END_USE(vq);
return ret;
}
-EXPORT_SYMBOL_GPL(virtqueue_get_buf);
/**
- * virtqueue_disable_cb - disable callbacks
+ * vring_disable_cb - disable callbacks
* @vq: the struct virtqueue we're talking about.
*
* Note that this is not necessarily synchronous, hence unreliable and only
@@ -479,16 +474,15 @@
*
* Unlike other operations, this need not be serialized.
*/
-void virtqueue_disable_cb(struct virtqueue *_vq)
+static void vring_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
}
-EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
/**
- * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * vring_enable_cb - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks; it returns "false" if there are pending
@@ -498,7 +492,7 @@
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-bool virtqueue_enable_cb(struct virtqueue *_vq)
+static bool vring_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -520,10 +514,9 @@
END_USE(vq);
return true;
}
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
/**
- * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * vring_enable_cb_delayed - restart callbacks after disable_cb.
* @vq: the struct virtqueue we're talking about.
*
* This re-enables callbacks but hints to the other side to delay
@@ -535,7 +528,7 @@
* Caller must ensure we don't call this with other virtqueue
* operations at the same time (except where noted).
*/
-bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
+static bool vring_enable_cb_delayed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 bufs;
@@ -560,17 +553,16 @@
END_USE(vq);
return true;
}
-EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
/**
- * virtqueue_detach_unused_buf - detach first unused buffer
+ * vring_detach_unused_buf - detach first unused buffer
* @vq: the struct virtqueue we're talking about.
*
- * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * Returns NULL or the "data" token handed to vring_add_buf().
* This is not valid on an active queue; it is useful only for device
* shutdown.
*/
-void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
+static void *vring_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i;
@@ -594,7 +586,6 @@
END_USE(vq);
return NULL;
}
-EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
irqreturn_t vring_interrupt(int irq, void *_vq)
{
@@ -616,6 +607,34 @@
}
EXPORT_SYMBOL_GPL(vring_interrupt);
+/**
+ * get_vring_size - return the size of the virtqueue's vring
+ * @vq: the struct virtqueue containing the vring of interest.
+ *
+ * Returns the size of the vring. This is mainly used for boasting to
+ * userspace. Unlike other operations, this need not be serialized.
+ */
+static unsigned int get_vring_size(struct virtqueue *_vq)
+{
+
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->vring.num;
+}
+
+static struct virtqueue_ops vring_vq_ops = {
+ .add_buf = vring_add_buf,
+ .get_buf = vring_get_buf,
+ .kick = vring_kick,
+ .kick_prepare = vring_kick_prepare,
+ .kick_notify = vring_kick_notify,
+ .disable_cb = vring_disable_cb,
+ .enable_cb = vring_enable_cb,
+ .enable_cb_delayed = vring_enable_cb_delayed,
+ .detach_unused_buf = vring_detach_unused_buf,
+ .get_impl_size = get_vring_size,
+};
+
struct virtqueue *vring_new_virtqueue(unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
@@ -641,6 +660,7 @@
vring_init(&vq->vring, num, pages, vring_align);
vq->vq.callback = callback;
vq->vq.vdev = vdev;
+ vq->vq.vq_ops = &vring_vq_ops;
vq->vq.name = name;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
@@ -699,20 +719,4 @@
}
EXPORT_SYMBOL_GPL(vring_transport_features);
-/**
- * virtqueue_get_vring_size - return the size of the virtqueue's vring
- * @vq: the struct virtqueue containing the vring of interest.
- *
- * Returns the size of the vring. This is mainly used for boasting to
- * userspace. Unlike other operations, this need not be serialized.
- */
-unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
-{
-
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- return vq->vring.num;
-}
-EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
-
MODULE_LICENSE("GPL");
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8efd28a..0d0f6d3 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -15,6 +15,7 @@
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
+ * @vq_ops: the operations for this virtqueue (see below).
* @priv: a pointer for the virtqueue implementation to use.
*/
struct virtqueue {
@@ -22,33 +23,235 @@
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
+ struct virtqueue_ops *vq_ops;
void *priv;
};
-int virtqueue_add_buf(struct virtqueue *vq,
- struct scatterlist sg[],
- unsigned int out_num,
- unsigned int in_num,
- void *data,
- gfp_t gfp);
+/**
+ * virtqueue_ops - operations for virtqueue abstraction layer
+ * @add_buf: expose buffer to other end
+ * vq: the struct virtqueue we're talking about.
+ * sg: the description of the buffer(s).
+ * out_num: the number of sg readable by other side
+ * in_num: the number of sg which are writable (after readable ones)
+ * data: the token identifying the buffer.
+ * Returns remaining capacity of queue (sg segments) or a negative error.
+ * @kick: update after add_buf
+ * vq: the struct virtqueue
+ * After one or more add_buf calls, invoke this to kick the other side.
+ * @get_buf: get the next used buffer
+ * vq: the struct virtqueue we're talking about.
+ * len: the length written into the buffer
+ * Returns NULL or the "data" token handed to add_buf.
+ * @disable_cb: disable callbacks
+ * vq: the struct virtqueue we're talking about.
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ * @enable_cb: restart callbacks after disable_cb.
+ * vq: the struct virtqueue we're talking about.
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ * @enable_cb_delayed: restart callbacks after disable_cb.
+ * vq: the struct virtqueue we're talking about.
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ * @detach_unused_buf: detach first unused buffer
+ * vq: the struct virtqueue we're talking about.
+ * Returns NULL or the "data" token handed to add_buf
+ * @get_impl_size: return the size of the virtqueue's implementation
+ * vq: the struct virtqueue containing the implementation of interest.
+ * Returns the size of the implementation. This is mainly used for
+ * boasting to userspace. Unlike other operations, this need not
+ * be serialized.
+ *
+ * Locking rules are straightforward: the driver is responsible for
+ * locking. No two operations may be invoked simultaneously, with the exception
+ * of @disable_cb.
+ *
+ * All operations can be called in any context.
+ */
+struct virtqueue_ops {
+ int (*add_buf)(struct virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data,
+ gfp_t gfp);
-void virtqueue_kick(struct virtqueue *vq);
+ void (*kick)(struct virtqueue *vq);
+ bool (*kick_prepare)(struct virtqueue *vq);
+ void (*kick_notify)(struct virtqueue *vq);
+ void *(*get_buf)(struct virtqueue *vq, unsigned int *len);
+ void (*disable_cb)(struct virtqueue *vq);
+ bool (*enable_cb)(struct virtqueue *vq);
+ bool (*enable_cb_delayed)(struct virtqueue *vq);
+ void *(*detach_unused_buf)(struct virtqueue *vq);
+ unsigned int (*get_impl_size)(struct virtqueue *vq);
+};
-bool virtqueue_kick_prepare(struct virtqueue *vq);
+/**
+ * virtqueue_add_buf - expose buffer to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: the description of the buffer(s).
+ * @out_num: the number of sg readable by other side
+ * @in_num: the number of sg which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns remaining capacity of queue or a negative error.
+ */
+static inline int virtqueue_add_buf(struct virtqueue *vq,
+ struct scatterlist sg[],
+ unsigned int out_num,
+ unsigned int in_num,
+ void *data,
+ gfp_t gfp)
+{
+ return vq->vq_ops->add_buf(vq, sg, out_num, in_num, data, gfp);
+}
+/**
+ * virtqueue_kick - update after add_buf
+ * @vq: the struct virtqueue
+ *
+ * After one or more virtqueue_add_buf calls, invoke this to kick
+ * the other side.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline void virtqueue_kick(struct virtqueue *vq)
+{
+ vq->vq_ops->kick(vq);
+}
-void virtqueue_notify(struct virtqueue *vq);
+/**
+ * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * Instead of virtqueue_kick(), you can do:
+ * if (virtqueue_kick_prepare(vq))
+ * virtqueue_kick_notify(vq);
+ *
+ * This is sometimes useful because the virtqueue_kick_prepare() needs
+ * to be serialized, but the actual virtqueue_kick_notify() call does not.
+ */
+static inline bool virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return vq->vq_ops->kick_prepare(vq);
+}
-void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+/**
+ * virtqueue_kick_notify - second half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ */
+static inline void virtqueue_kick_notify(struct virtqueue *vq)
+{
+ vq->vq_ops->kick_notify(vq);
+}
-void virtqueue_disable_cb(struct virtqueue *vq);
+/**
+ * virtqueue_get_buf - get the next used buffer
+ * @vq: the struct virtqueue we're talking about.
+ * @len: the length written into the buffer
+ *
+ * If the driver wrote data into the buffer, @len will be set to the
+ * amount written. This means you don't need to clear the buffer
+ * beforehand to ensure there's no data leakage in the case of short
+ * writes.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns NULL if there are no used buffers, or the "data" token
+ * handed to virtqueue_add_buf().
+ */
+static inline void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len)
+{
+ return vq->vq_ops->get_buf(vq, len);
+}
-bool virtqueue_enable_cb(struct virtqueue *vq);
+/**
+ * virtqueue_disable_cb - disable callbacks
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ *
+ * Unlike other operations, this need not be serialized.
+ */
+static inline void virtqueue_disable_cb(struct virtqueue *vq)
+{
+ vq->vq_ops->disable_cb(vq);
+}
-bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline bool virtqueue_enable_cb(struct virtqueue *vq)
+{
+ return vq->vq_ops->enable_cb(vq);
+}
-void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+/**
+ * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+static inline bool virtqueue_enable_cb_delayed(struct virtqueue *vq)
+{
+ return vq->vq_ops->enable_cb_delayed(vq);
+}
-unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+/**
+ * virtqueue_detach_unused_buf - detach first unused buffer
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * This is not valid on an active queue; it is useful only for device
+ * shutdown.
+ */
+static inline void *virtqueue_detach_unused_buf(struct virtqueue *vq)
+{
+ return vq->vq_ops->detach_unused_buf(vq);
+}
+
+/**
+ * virtqueue_get_impl_size - return the size of the virtqueue's implementation
+ * @vq: the struct virtqueue containing the implementation of interest.
+ *
+ * Returns the size of the virtqueue implementation. This is mainly used
+ * for boasting to userspace. Unlike other operations, this need not
+ * be serialized.
+ */
+static inline unsigned int virtqueue_get_impl_size(struct virtqueue *vq)
+{
+ return vq->vq_ops->get_impl_size(vq);
+}
/**
* virtio_device - representation of a device using virtio