Merge "msm: ADSPRPC: Buffer length to be copied is truncated" into msm-4.8
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c90de2c..4d6ee1b 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -357,8 +357,10 @@ struct arm_smmu_master_cfg {
#define INVALID_SMENDX -1
#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
+#define fwspec_smendx(fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
#define for_each_cfg_sme(fw, i, idx) \
- for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
+ for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
/*
* Describes resources required for on/off power operation.
@@ -2088,6 +2090,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENXIO;
}
+ /*
+ * FIXME: The arch/arm DMA API code tries to attach devices to its own
+ * domains between of_xlate() and add_device() - we have no way to cope
+ * with that, so until ARM gets converted to rely on groups and default
+ * domains, just say no (but more politely than by dereferencing NULL).
+ * This should be at least a WARN_ON once that's sorted.
+ */
+ if (!fwspec->iommu_priv)
+ return -ENODEV;
+
smmu = fwspec_smmu(fwspec);
/* Enable Clocks and Power */
@@ -2354,7 +2366,7 @@ static int arm_smmu_add_device(struct device *dev)
fwspec = dev->iommu_fwspec;
if (ret)
goto out_free;
- } else if (fwspec) {
+ } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
if (!smmu)
return -ENODEV;
@@ -2362,6 +2374,10 @@ static int arm_smmu_add_device(struct device *dev)
return -ENODEV;
}
+ ret = arm_smmu_power_on(smmu->pwr);
+ if (ret)
+ goto out_free;
+
ret = -EINVAL;
for (i = 0; i < fwspec->num_ids; i++) {
u16 sid = fwspec->ids[i];
@@ -2370,12 +2386,12 @@ static int arm_smmu_add_device(struct device *dev)
if (sid & ~smmu->streamid_mask) {
dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
sid, smmu->streamid_mask);
- goto out_free;
+ goto out_pwr_off;
}
if (mask & ~smmu->smr_mask_mask) {
dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
sid, smmu->smr_mask_mask);
- goto out_free;
+ goto out_pwr_off;
}
}
@@ -2383,7 +2399,7 @@ static int arm_smmu_add_device(struct device *dev)
cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
GFP_KERNEL);
if (!cfg)
- goto out_free;
+ goto out_pwr_off;
cfg->smmu = smmu;
fwspec->iommu_priv = cfg;
@@ -2392,10 +2408,13 @@ static int arm_smmu_add_device(struct device *dev)
ret = arm_smmu_master_alloc_smes(dev);
if (ret)
- goto out_free;
+ goto out_pwr_off;
+ arm_smmu_power_off(smmu->pwr);
return 0;
+out_pwr_off:
+ arm_smmu_power_off(smmu->pwr);
out_free:
if (fwspec)
kfree(fwspec->iommu_priv);
@@ -2406,14 +2425,22 @@ static int arm_smmu_add_device(struct device *dev)
static void arm_smmu_remove_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
+ smmu = fwspec_smmu(fwspec);
+ if (arm_smmu_power_on(smmu->pwr)) {
+ WARN_ON(1);
+ return;
+ }
+
arm_smmu_master_free_smes(fwspec);
iommu_group_remove_device(dev);
kfree(fwspec->iommu_priv);
iommu_fwspec_free(dev);
+ arm_smmu_power_off(smmu->pwr);
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index 3e356c0..b929158 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@ struct rmnet_logical_ep_conf_s {
u8 rmnet_mode;
u8 mux_id;
struct timespec flush_time;
+ unsigned int flush_byte_count;
struct net_device *egress_dev;
};
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 42955a2..46fdf5a 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -46,6 +46,22 @@ long gro_flush_time __read_mostly = 10000L;
module_param(gro_flush_time, long, 0644);
MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
+unsigned int gro_min_byte_thresh __read_mostly = 7500;
+module_param(gro_min_byte_thresh, uint, 0644);
+MODULE_PARM_DESC(gro_min_byte_thresh, "Min byte thresh to change flush time");
+
+unsigned int dynamic_gro_on __read_mostly = 1;
+module_param(dynamic_gro_on, uint, 0644);
+MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
+
+unsigned int upper_flush_time __read_mostly = 15000;
+module_param(upper_flush_time, uint, 0644);
+MODULE_PARM_DESC(upper_flush_time, "Upper limit on flush time");
+
+unsigned int upper_byte_limit __read_mostly = 10500;
+module_param(upper_byte_limit, uint, 0644);
+MODULE_PARM_DESC(upper_byte_limit, "Upper byte limit");
+
#define RMNET_DATA_IP_VERSION_4 0x40
#define RMNET_DATA_IP_VERSION_6 0x60
@@ -221,7 +237,8 @@ static int rmnet_check_skb_can_gro(struct sk_buff *skb)
* ratio.
*/
static void rmnet_optional_gro_flush(struct napi_struct *napi,
- struct rmnet_logical_ep_conf_s *ep)
+ struct rmnet_logical_ep_conf_s *ep,
+ unsigned int skb_size)
{
struct timespec curr_time, diff;
@@ -230,12 +247,58 @@ static void rmnet_optional_gro_flush(struct napi_struct *napi,
if (unlikely(ep->flush_time.tv_sec == 0)) {
getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
} else {
getnstimeofday(&(curr_time));
diff = timespec_sub(curr_time, ep->flush_time);
- if ((diff.tv_sec > 0) || (diff.tv_nsec > gro_flush_time)) {
+ ep->flush_byte_count += skb_size;
+
+ if (dynamic_gro_on) {
+ if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
+ gro_flush_time) &&
+ ep->flush_byte_count >=
+ gro_min_byte_thresh) {
+ /* Processed many bytes in a small time window.
+ * No longer need to flush so often and we can
+ * increase our byte limit
+ */
+ gro_flush_time = upper_flush_time;
+ gro_min_byte_thresh = upper_byte_limit;
+ } else if ((diff.tv_sec > 0 ||
+ diff.tv_nsec > gro_flush_time) &&
+ ep->flush_byte_count <
+ gro_min_byte_thresh) {
+ /* We have not hit our time limit and we are not
+ * receive many bytes. Demote ourselves to the
+ * lowest limits and flush
+ */
+ napi_gro_flush(napi, false);
+ getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
+ gro_flush_time = 10000L;
+ gro_min_byte_thresh = 7500L;
+ } else if ((diff.tv_sec > 0 ||
+ diff.tv_nsec > gro_flush_time) &&
+ ep->flush_byte_count >=
+ gro_min_byte_thresh) {
+ /* Above byte and time limt, therefore we can
+ * move/maintain our limits to be the max
+ * and flush
+ */
+ napi_gro_flush(napi, false);
+ getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
+ gro_flush_time = upper_flush_time;
+ gro_min_byte_thresh = upper_byte_limit;
+ }
+ /* else, below time limit and below
+ * byte thresh, so change nothing
+ */
+ } else if (diff.tv_sec > 0 ||
+ diff.tv_nsec >= gro_flush_time) {
napi_gro_flush(napi, false);
getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
}
}
}
@@ -254,6 +317,7 @@ static rx_handler_result_t __rmnet_deliver_skb
{
struct napi_struct *napi = NULL;
gro_result_t gro_res;
+ unsigned int skb_size;
trace___rmnet_deliver_skb(skb);
switch (ep->rmnet_mode) {
@@ -277,9 +341,11 @@ static rx_handler_result_t __rmnet_deliver_skb
(skb->dev->features & NETIF_F_GRO)) {
napi = get_current_napi_context();
if (napi) {
+ skb_size = skb->len;
gro_res = napi_gro_receive(napi, skb);
trace_rmnet_gro_downlink(gro_res);
- rmnet_optional_gro_flush(napi, ep);
+ rmnet_optional_gro_flush(napi, ep,
+ skb_size);
} else {
WARN_ONCE(1, "current napi is NULL\n");
netif_receive_skb(skb);