Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/renesas/sh_eth.c
Overlapping changes in both conflict cases.
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/Changes b/Documentation/Changes
index 1de131b..74bdda9 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -383,7 +383,7 @@
Ip-route2
---------
-o <ftp://ftp.tux.org/pub/net/ip-routing/iproute2-2.2.4-now-ss991023.tar.gz>
+o <https://www.kernel.org/pub/linux/utils/net/iproute2/>
OProfile
--------
diff --git a/Makefile b/Makefile
index 2fd5c4e..ce70361 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Diseased Newt
# *DOCUMENTATION*
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 9b55143..9fd6834 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -426,7 +426,7 @@
u32 *p = &ctx->target[ctx->idx];
uasm_i_divu(&p, dst, src);
p = &ctx->target[ctx->idx + 1];
- uasm_i_mflo(&p, dst);
+ uasm_i_mfhi(&p, dst);
}
ctx->idx += 2; /* 2 insts */
}
@@ -971,7 +971,7 @@
break;
case BPF_ALU | BPF_MOD | BPF_K:
/* A %= k */
- if (k == 1 || optimize_div(&k)) {
+ if (k == 1) {
ctx->flags |= SEEN_A;
emit_jit_reg_move(r_A, r_zero, ctx);
} else {
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index dd1c24c..3f51cf4 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -54,12 +54,8 @@
*/
local_irq_save(flags);
local_mcck_disable();
- /*
- * Ummm... Does this make sense at all? Copying the percpu struct
- * and then zapping it one statement later?
- */
- memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
- memset(&mcck, 0, sizeof(struct mcck_struct));
+ mcck = *this_cpu_ptr(&cpu_mcck);
+ memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
clear_cpu_flag(CIF_MCCK_PENDING);
local_mcck_enable();
local_irq_restore(flags);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 626e013..9875143 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -178,7 +178,7 @@
}
struct jit_context {
- unsigned int cleanup_addr; /* epilogue code offset */
+ int cleanup_addr; /* epilogue code offset */
bool seen_ld_abs;
};
@@ -192,6 +192,7 @@
struct bpf_insn *insn = bpf_prog->insnsi;
int insn_cnt = bpf_prog->len;
bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
+ bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i;
int proglen = 0;
@@ -854,10 +855,11 @@
goto common_load;
case BPF_JMP | BPF_EXIT:
- if (i != insn_cnt - 1) {
+ if (seen_exit) {
jmp_offset = ctx->cleanup_addr - addrs[i];
goto emit_jmp;
}
+ seen_exit = true;
/* update cleanup_addr */
ctx->cleanup_addr = proglen;
/* mov rbx, qword ptr [rbp-X] */
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0984232..5cbd5d9 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -216,9 +216,10 @@
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_iter iter;
- struct bio_vec *bv;
+ struct bvec_iter bviter;
+ struct bio_vec bv;
struct bio_integrity_payload *bip = bio_integrity(bio);
- unsigned int i, ret = 0;
+ unsigned int ret = 0;
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
@@ -227,11 +228,11 @@
iter.seed = bip_get_seed(bip);
iter.prot_buf = prot_buf;
- bio_for_each_segment_all(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page);
+ bio_for_each_segment(bv, bio, bviter) {
+ void *kaddr = kmap_atomic(bv.bv_page);
- iter.data_buf = kaddr + bv->bv_offset;
- iter.data_size = bv->bv_len;
+ iter.data_buf = kaddr + bv.bv_offset;
+ iter.data_size = bv.bv_len;
ret = proc_fn(&iter);
if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0a1a56..8bcdb98 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9408,6 +9408,10 @@
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (i915_reset_in_progress(&dev_priv->gpu_error) ||
+ crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ return true;
+
/*
* The relevant registers doen't exist on pre-ctg.
* As the flip done interrupt doesn't trigger for mmio
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ad45bf..4bcd917 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4450,6 +4450,7 @@
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
+ cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index cd05677..72a40f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -218,7 +218,6 @@
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 5ae6a43..1931057 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -551,8 +551,8 @@
}
if (status & 0x40000000) {
- nouveau_fifo_uevent(&priv->base);
nv_wr32(priv, 0x002100, 0x40000000);
+ nouveau_fifo_uevent(&priv->base);
status &= ~0x40000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 1fe1f8f..074d434 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -740,6 +740,8 @@
u32 inte = nv_rd32(priv, 0x002628);
u32 unkn;
+ nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+
for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) {
@@ -751,8 +753,6 @@
nv_mask(priv, 0x002628, ints, 0);
}
}
-
- nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index d2f0fd3..f8734eb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -952,8 +952,8 @@
}
if (stat & 0x80000000) {
- nve0_fifo_intr_engine(priv);
nv_wr32(priv, 0x002100, 0x80000000);
+ nve0_fifo_intr_engine(priv);
stat &= ~0x80000000;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5723807..62b97c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -629,7 +629,6 @@
pci_save_state(pdev);
pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -933,6 +932,7 @@
ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 515cd9a..f32a434 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -52,20 +52,24 @@
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
}
-static void
+static int
nouveau_fence_signal(struct nouveau_fence *fence)
{
+ int drop = 0;
+
fence_signal_locked(&fence->base);
list_del(&fence->head);
+ rcu_assign_pointer(fence->channel, NULL);
if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
- nvif_notify_put(&fctx->notify);
+ drop = 1;
}
fence_put(&fence->base);
+ return drop;
}
static struct nouveau_fence *
@@ -88,16 +92,23 @@
{
struct nouveau_fence *fence;
- nvif_notify_fini(&fctx->notify);
-
spin_lock_irq(&fctx->lock);
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
- nouveau_fence_signal(fence);
- fence->channel = NULL;
+ if (nouveau_fence_signal(fence))
+ nvif_notify_put(&fctx->notify);
}
spin_unlock_irq(&fctx->lock);
+
+ nvif_notify_fini(&fctx->notify);
+ fctx->dead = 1;
+
+ /*
+ * Ensure that all accesses to fence->channel complete before freeing
+ * the channel.
+ */
+ synchronize_rcu();
}
static void
@@ -112,21 +123,23 @@
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
}
-static void
+static int
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence;
-
+ int drop = 0;
u32 seq = fctx->read(chan);
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
if ((int)(seq - fence->base.seqno) < 0)
- return;
+ break;
- nouveau_fence_signal(fence);
+ drop |= nouveau_fence_signal(fence);
}
+
+ return drop;
}
static int
@@ -135,18 +148,21 @@
struct nouveau_fence_chan *fctx =
container_of(notify, typeof(*fctx), notify);
unsigned long flags;
+ int ret = NVIF_NOTIFY_KEEP;
spin_lock_irqsave(&fctx->lock, flags);
if (!list_empty(&fctx->pending)) {
struct nouveau_fence *fence;
+ struct nouveau_channel *chan;
fence = list_entry(fctx->pending.next, typeof(*fence), head);
- nouveau_fence_update(fence->channel, fctx);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (nouveau_fence_update(fence->channel, fctx))
+ ret = NVIF_NOTIFY_DROP;
}
spin_unlock_irqrestore(&fctx->lock, flags);
- /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
- return NVIF_NOTIFY_KEEP;
+ return ret;
}
void
@@ -262,7 +278,10 @@
if (!ret) {
fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
- nouveau_fence_update(chan, fctx);
+
+ if (nouveau_fence_update(chan, fctx))
+ nvif_notify_put(&fctx->notify);
+
list_add_tail(&fence->head, &fctx->pending);
spin_unlock_irq(&fctx->lock);
}
@@ -276,13 +295,16 @@
if (fence->base.ops == &nouveau_fence_ops_legacy ||
fence->base.ops == &nouveau_fence_ops_uevent) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ struct nouveau_channel *chan;
unsigned long flags;
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
spin_lock_irqsave(&fctx->lock, flags);
- nouveau_fence_update(fence->channel, fctx);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (chan && nouveau_fence_update(chan, fctx))
+ nvif_notify_put(&fctx->notify);
spin_unlock_irqrestore(&fctx->lock, flags);
}
return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL;
+ bool must_wait = true;
f = nouveau_local_fence(fence, chan->drm);
- if (f)
- prev = f->channel;
+ if (f) {
+ rcu_read_lock();
+ prev = rcu_dereference(f->channel);
+ if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+ must_wait = false;
+ rcu_read_unlock();
+ }
- if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+ if (must_wait)
ret = fence_wait(fence, intr);
return ret;
@@ -403,19 +431,22 @@
for (i = 0; i < fobj->shared_count && !ret; ++i) {
struct nouveau_channel *prev = NULL;
+ bool must_wait = true;
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(resv));
f = nouveau_local_fence(fence, chan->drm);
- if (f)
- prev = f->channel;
+ if (f) {
+ rcu_read_lock();
+ prev = rcu_dereference(f->channel);
+ if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+ must_wait = false;
+ rcu_read_unlock();
+ }
- if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+ if (must_wait)
ret = fence_wait(fence, intr);
-
- if (ret)
- break;
}
return ret;
@@ -463,7 +494,7 @@
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- return fence->channel ? fctx->name : "dead channel";
+ return !fctx->dead ? fctx->name : "dead channel";
}
/*
@@ -476,9 +507,16 @@
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- struct nouveau_channel *chan = fence->channel;
+ struct nouveau_channel *chan;
+ bool ret = false;
- return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+ rcu_read_lock();
+ chan = rcu_dereference(fence->channel);
+ if (chan)
+ ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+ rcu_read_unlock();
+
+ return ret;
}
static bool nouveau_fence_no_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 943b0b1..96e461c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -14,7 +14,7 @@
bool sysmem;
- struct nouveau_channel *channel;
+ struct nouveau_channel __rcu *channel;
unsigned long timeout;
};
@@ -47,7 +47,7 @@
char name[32];
struct nvif_notify notify;
- int notify_ref;
+ int notify_ref, dead;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 300c4b3..26baa9c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -322,6 +322,12 @@
}
if (!radeon_connector->edid) {
+ /* don't fetch the edid from the vbios if ddc fails and runpm is
+ * enabled so we report disconnected.
+ */
+ if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+ return;
+
if (rdev->is_atom_bios) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
@@ -826,6 +832,8 @@
static enum drm_connector_status
radeon_lvds_detect(struct drm_connector *connector, bool force)
{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
enum drm_connector_status ret = connector_status_disconnected;
@@ -842,7 +850,11 @@
/* check if panel is valid */
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
ret = connector_status_connected;
-
+ /* don't fetch the edid from the vbios if ddc fails and runpm is
+ * enabled so we report disconnected.
+ */
+ if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+ ret = connector_status_disconnected;
}
/* check for edid as well */
@@ -1589,6 +1601,11 @@
/* check if panel is valid */
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
ret = connector_status_connected;
+ /* don't fetch the edid from the vbios if ddc fails and runpm is
+ * enabled so we report disconnected.
+ */
+ if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
+ ret = connector_status_disconnected;
}
/* eDP is always DP */
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a3e7aed..6f377de 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -251,22 +251,19 @@
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- int i, r = 0;
+ struct radeon_cs_reloc *reloc;
+ int r;
- for (i = 0; i < p->nrelocs; i++) {
+ list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv;
- if (!p->relocs[i].robj)
- continue;
-
- resv = p->relocs[i].robj->tbo.resv;
+ resv = reloc->robj->tbo.resv;
r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
- p->relocs[i].tv.shared);
-
+ reloc->tv.shared);
if (r)
- break;
+ return r;
}
- return r;
+ return 0;
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8309b11..0358676 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -795,6 +795,8 @@
/* Get associated drm_crtc: */
drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+ if (!drmcrtc)
+ return -EINVAL;
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 99a960a..4c0d786 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -213,6 +213,13 @@
if (!(rdev->flags & RADEON_IS_PCIE))
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#ifdef CONFIG_X86_32
+ /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+ */
+ bo->flags &= ~RADEON_GEM_GTT_WC;
+#endif
+
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index edca99d..23628b7 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -359,7 +359,7 @@
}
/* Configure Tx/Rx FIFO threshold levels */
- dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
+ dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
dw_writel(dev, 0, DW_IC_RX_TL);
/* configure the i2c master */
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 26942c1..277a228 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -922,14 +922,12 @@
if (stat & OMAP_I2C_STAT_NACK) {
err |= OMAP_I2C_STAT_NACK;
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
- break;
}
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
- break;
}
/*
@@ -954,11 +952,13 @@
if (dev->fifo_size)
num_bytes = dev->buf_len;
- omap_i2c_receive_data(dev, num_bytes, true);
-
- if (dev->errata & I2C_OMAP_ERRATA_I207)
+ if (dev->errata & I2C_OMAP_ERRATA_I207) {
i2c_omap_errata_i207(dev, stat);
+ num_bytes = (omap_i2c_read_reg(dev,
+ OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
+ }
+ omap_i2c_receive_data(dev, num_bytes, true);
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
continue;
}
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index bc20348..8afa28e 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -421,7 +421,7 @@
err_free_client:
evdev_detach_client(evdev, client);
- kfree(client);
+ kvfree(client);
return error;
}
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 3e6eebd..7b11243 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -225,7 +225,12 @@
bond_option_arp_ip_targets_clear(bond);
nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
- __be32 target = nla_get_be32(attr);
+ __be32 target;
+
+ if (nla_len(attr) < sizeof(target))
+ return -EINVAL;
+
+ target = nla_get_be32(attr);
bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 925ab8e..4e1659d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -316,7 +316,7 @@
if (err) {
netdev_err(dev->netdev, "getting serial failure: %d\n", err);
} else if (serial_number) {
- u32 tmp32;
+ __le32 tmp32;
memcpy(&tmp32, args, 4);
*serial_number = le32_to_cpu(tmp32);
@@ -347,7 +347,7 @@
*/
static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc)
{
- u16 tmp16;
+ __le16 tmp16;
if ((mc->ptr+2) > mc->end)
return -EINVAL;
@@ -371,7 +371,7 @@
{
/* only 1st packet supplies a word timestamp */
if (first_packet) {
- u16 tmp16;
+ __le16 tmp16;
if ((mc->ptr + 2) > mc->end)
return -EINVAL;
@@ -614,7 +614,7 @@
return -ENOMEM;
if (status_len & PCAN_USB_STATUSLEN_EXT_ID) {
- u32 tmp32;
+ __le32 tmp32;
if ((mc->ptr + 4) > mc->end)
goto decode_failed;
@@ -622,9 +622,9 @@
memcpy(&tmp32, mc->ptr, 4);
mc->ptr += 4;
- cf->can_id = le32_to_cpu(tmp32 >> 3) | CAN_EFF_FLAG;
+ cf->can_id = (le32_to_cpu(tmp32) >> 3) | CAN_EFF_FLAG;
} else {
- u16 tmp16;
+ __le16 tmp16;
if ((mc->ptr + 2) > mc->end)
goto decode_failed;
@@ -632,7 +632,7 @@
memcpy(&tmp16, mc->ptr, 2);
mc->ptr += 2;
- cf->can_id = le16_to_cpu(tmp16 >> 5);
+ cf->can_id = le16_to_cpu(tmp16) >> 5;
}
cf->can_dlc = get_can_dlc(rec_len);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 644e6ab..c62f48a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -735,7 +735,7 @@
dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
if (!dev->cmd_buf) {
err = -ENOMEM;
- goto lbl_set_intf_data;
+ goto lbl_free_candev;
}
dev->udev = usb_dev;
@@ -775,7 +775,7 @@
err = register_candev(netdev);
if (err) {
dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
- goto lbl_free_cmd_buf;
+ goto lbl_restore_intf_data;
}
if (dev->prev_siblings)
@@ -788,14 +788,14 @@
if (dev->adapter->dev_init) {
err = dev->adapter->dev_init(dev);
if (err)
- goto lbl_free_cmd_buf;
+ goto lbl_unregister_candev;
}
/* set bus off */
if (dev->adapter->dev_set_bus) {
err = dev->adapter->dev_set_bus(dev, 0);
if (err)
- goto lbl_free_cmd_buf;
+ goto lbl_unregister_candev;
}
/* get device number early */
@@ -807,11 +807,14 @@
return 0;
-lbl_free_cmd_buf:
+lbl_unregister_candev:
+ unregister_candev(netdev);
+
+lbl_restore_intf_data:
+ usb_set_intfdata(intf, dev->prev_siblings);
kfree(dev->cmd_buf);
-lbl_set_intf_data:
- usb_set_intfdata(intf, dev->prev_siblings);
+lbl_free_candev:
free_candev(netdev);
return err;
@@ -853,6 +856,7 @@
const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
+ const u16 usb_id_product = le16_to_cpu(usb_dev->descriptor.idProduct);
struct peak_usb_adapter *peak_usb_adapter, **pp;
int i, err = -ENOMEM;
@@ -860,7 +864,7 @@
/* get corresponding PCAN-USB adapter */
for (pp = peak_usb_adapters_list; *pp; pp++)
- if ((*pp)->device_id == usb_dev->descriptor.idProduct)
+ if ((*pp)->device_id == usb_id_product)
break;
peak_usb_adapter = *pp;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 263dd92..4cfa3b8 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -78,8 +78,8 @@
int rec_buffer_size;
int rec_buffer_len;
union {
- u16 *rec_cnt_rd;
- u32 *rec_cnt;
+ __le16 *rec_cnt_rd;
+ __le32 *rec_cnt;
u8 *rec_buffer;
} u;
};
@@ -155,7 +155,7 @@
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
*pc++ = va_arg(ap, int);
- *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
pc += 4;
memcpy(pc, va_arg(ap, int *), i);
pc += i;
@@ -165,7 +165,7 @@
case PCAN_USBPRO_GETDEVID:
*pc++ = va_arg(ap, int);
pc += 2;
- *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
pc += 4;
break;
@@ -173,21 +173,21 @@
case PCAN_USBPRO_SETBUSACT:
case PCAN_USBPRO_SETSILENT:
*pc++ = va_arg(ap, int);
- *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ *(__le16 *)pc = cpu_to_le16(va_arg(ap, int));
pc += 2;
break;
case PCAN_USBPRO_SETLED:
*pc++ = va_arg(ap, int);
- *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ *(__le16 *)pc = cpu_to_le16(va_arg(ap, int));
pc += 2;
- *(u32 *)pc = cpu_to_le32(va_arg(ap, u32));
+ *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
pc += 4;
break;
case PCAN_USBPRO_SETTS:
pc++;
- *(u16 *)pc = cpu_to_le16(va_arg(ap, int));
+ *(__le16 *)pc = cpu_to_le16(va_arg(ap, int));
pc += 2;
break;
@@ -200,7 +200,7 @@
len = pc - pm->rec_ptr;
if (len > 0) {
- *pm->u.rec_cnt = cpu_to_le32(*pm->u.rec_cnt+1);
+ *pm->u.rec_cnt = cpu_to_le32(le32_to_cpu(*pm->u.rec_cnt) + 1);
*pm->rec_ptr = id;
pm->rec_ptr = pc;
@@ -333,8 +333,6 @@
if (!(dev->state & PCAN_USB_STATE_CONNECTED))
return 0;
- memset(req_addr, '\0', req_size);
-
req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
switch (req_id) {
@@ -345,6 +343,7 @@
default:
p = usb_rcvctrlpipe(dev->udev, 0);
req_type |= USB_DIR_IN;
+ memset(req_addr, '\0', req_size);
break;
}
@@ -572,7 +571,7 @@
static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
struct pcan_usb_pro_rxstatus *er)
{
- const u32 raw_status = le32_to_cpu(er->status);
+ const u16 raw_status = le16_to_cpu(er->status);
const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f;
struct peak_usb_device *dev = usb_if->dev[ctrl_idx];
struct net_device *netdev = dev->netdev;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 32275af..837cee2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -33,27 +33,27 @@
/* PCAN_USBPRO_INFO_BL vendor request record type */
struct __packed pcan_usb_pro_blinfo {
- u32 ctrl_type;
+ __le32 ctrl_type;
u8 version[4];
u8 day;
u8 month;
u8 year;
u8 dummy;
- u32 serial_num_hi;
- u32 serial_num_lo;
- u32 hw_type;
- u32 hw_rev;
+ __le32 serial_num_hi;
+ __le32 serial_num_lo;
+ __le32 hw_type;
+ __le32 hw_rev;
};
/* PCAN_USBPRO_INFO_FW vendor request record type */
struct __packed pcan_usb_pro_fwinfo {
- u32 ctrl_type;
+ __le32 ctrl_type;
u8 version[4];
u8 day;
u8 month;
u8 year;
u8 dummy;
- u32 fw_type;
+ __le32 fw_type;
};
/*
@@ -80,46 +80,46 @@
struct __packed pcan_usb_pro_btr {
u8 data_type;
u8 channel;
- u16 dummy;
- u32 CCBT;
+ __le16 dummy;
+ __le32 CCBT;
};
struct __packed pcan_usb_pro_busact {
u8 data_type;
u8 channel;
- u16 onoff;
+ __le16 onoff;
};
struct __packed pcan_usb_pro_silent {
u8 data_type;
u8 channel;
- u16 onoff;
+ __le16 onoff;
};
struct __packed pcan_usb_pro_filter {
u8 data_type;
u8 dummy;
- u16 filter_mode;
+ __le16 filter_mode;
};
struct __packed pcan_usb_pro_setts {
u8 data_type;
u8 dummy;
- u16 mode;
+ __le16 mode;
};
struct __packed pcan_usb_pro_devid {
u8 data_type;
u8 channel;
- u16 dummy;
- u32 serial_num;
+ __le16 dummy;
+ __le32 serial_num;
};
struct __packed pcan_usb_pro_setled {
u8 data_type;
u8 channel;
- u16 mode;
- u32 timeout;
+ __le16 mode;
+ __le32 timeout;
};
struct __packed pcan_usb_pro_rxmsg {
@@ -127,8 +127,8 @@
u8 client;
u8 flags;
u8 len;
- u32 ts32;
- u32 id;
+ __le32 ts32;
+ __le32 id;
u8 data[8];
};
@@ -141,15 +141,15 @@
struct __packed pcan_usb_pro_rxstatus {
u8 data_type;
u8 channel;
- u16 status;
- u32 ts32;
- u32 err_frm;
+ __le16 status;
+ __le32 ts32;
+ __le32 err_frm;
};
struct __packed pcan_usb_pro_rxts {
u8 data_type;
u8 dummy[3];
- u32 ts64[2];
+ __le32 ts64[2];
};
struct __packed pcan_usb_pro_txmsg {
@@ -157,7 +157,7 @@
u8 client;
u8 flags;
u8 len;
- u32 id;
+ __le32 id;
u8 data[8];
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 51b68d1..a50891f5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -473,7 +473,6 @@
memset(&rdata->tx, 0, sizeof(rdata->tx));
memset(&rdata->rx, 0, sizeof(rdata->rx));
- rdata->interrupt = 0;
rdata->mapped_as_page = 0;
if (rdata->state_saved) {
@@ -597,7 +596,11 @@
}
}
- /* Save the skb address in the last entry */
+ /* Save the skb address in the last entry. We always have some data
+ * that has been mapped so rdata is always advanced past the last
+ * piece of mapped data - use the entry pointed to by cur_index - 1.
+ */
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
rdata->skb = skb;
/* Save the number of descriptor entries used */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index bf6bf11..7bb5f07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1839,7 +1839,7 @@
spin_lock_irqsave(&ring->lock, flags);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
- (ring->dirty < ring->cur)) {
+ (ring->dirty != ring->cur)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
rdesc = rdata->rdesc;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 549549e..778e4cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -8119,10 +8119,11 @@
case SFP_EEPROM_CON_TYPE_VAL_LC:
case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
- if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
+ if (((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
(SFP_EEPROM_10G_COMP_CODE_SR_MASK |
SFP_EEPROM_10G_COMP_CODE_LR_MASK |
- SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
+ SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) &&
+ (val[SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) {
DP(NETIF_MSG_LINK, "1G SFP module detected\n");
phy->media_type = ETH_PHY_SFP_1G_FIBER;
if (phy->req_line_speed != SPEED_1000) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 07c6368..691f0bf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -45,6 +45,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
+#include <net/vxlan.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/workqueue.h>
@@ -12552,6 +12553,11 @@
return 0;
}
+static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev)
+{
+ return vxlan_gso_check(skb);
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -12583,6 +12589,7 @@
#endif
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
+ .ndo_gso_check = bnx2x_gso_check,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index c38a936..2c37e1b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -50,13 +50,13 @@
#include "cxgb4_uld.h"
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0B
-#define T4FW_VERSION_MICRO 0x1B
+#define T4FW_VERSION_MINOR 0x0C
+#define T4FW_VERSION_MICRO 0x19
#define T4FW_VERSION_BUILD 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0B
-#define T5FW_VERSION_MICRO 0x1B
+#define T5FW_VERSION_MINOR 0x0C
+#define T5FW_VERSION_MICRO 0x19
#define T5FW_VERSION_BUILD 0x00
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4c26be9..c8c5b3d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2354,9 +2354,13 @@
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
SUPPORTED_10000baseKX4_Full;
else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
+ type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
v |= SUPPORTED_FIBRE;
- else if (type == FW_PORT_TYPE_BP40_BA)
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_BP40_BA)
v |= SUPPORTED_40000baseSR4_Full;
if (caps & FW_PORT_CAP_ANEG)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a9323bd..67345c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -1132,6 +1132,27 @@
return FLASH_CFG_START;
}
+/* Return TRUE if the specified firmware matches the adapter. I.e. T4
+ * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
+ * and emit an error message for mismatched firmware to save our caller the
+ * effort ...
+ */
+static bool t4_fw_matches_chip(const struct adapter *adap,
+ const struct fw_hdr *hdr)
+{
+ /* The expression below will return FALSE for any unsupported adapter
+ * which will keep us "honest" in the future ...
+ */
+ if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
+ (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
+ return true;
+
+ dev_err(adap->pdev_dev,
+ "FW image (%d) is not suitable for this adapter (%d)\n",
+ hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
+ return false;
+}
+
/**
* t4_load_fw - download firmware
* @adap: the adapter
@@ -1171,6 +1192,8 @@
FW_MAX_SIZE);
return -EFBIG;
}
+ if (!t4_fw_matches_chip(adap, hdr))
+ return -EINVAL;
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
csum += ntohl(p[i]);
@@ -3083,6 +3106,9 @@
const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
int reset, ret;
+ if (!t4_fw_matches_chip(adap, fw_hdr))
+ return -EINVAL;
+
ret = t4_fw_halt(adap, mbox, force);
if (ret < 0 && !force)
return ret;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ccc3ce2..96208f1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -216,7 +216,7 @@
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 16
+#define MVNETA_TXDONE_COAL_PKTS 1
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
@@ -1721,6 +1721,7 @@
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_desc *tx_desc;
+ int len = skb->len;
int frags = 0;
u32 tx_cmd;
@@ -1788,7 +1789,7 @@
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 38f7cee..af829c5 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1173,8 +1173,8 @@
pep->rx_used_desc_q = 0;
pep->rx_curr_desc_q = 0;
netif_carrier_off(dev);
- eth_port_start(dev);
napi_enable(&pep->napi);
+ eth_port_start(dev);
return 0;
out_free_rx_skb:
rxq_deinit(dev);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 264eab7..7173836 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3433,10 +3433,9 @@
if (status & IS_HW_ERR)
skge_error_irq(hw);
-
+out:
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
-out:
spin_unlock(&hw->hw_lock);
return IRQ_RETVAL(handled);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f8ab220..867a6a3 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2416,6 +2416,7 @@
imask = sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
dev->trans_start = jiffies; /* prevent tx timeout */
napi_disable(&hw->napi);
@@ -3484,8 +3485,8 @@
int i;
if (hw->flags & SKY2_HW_IRQ_SETUP) {
- sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
+ sky2_read32(hw, B0_IMSK);
synchronize_irq(hw->pdev->irq);
napi_disable(&hw->napi);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index ad2e285..c29ba80 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -918,21 +918,13 @@
return ret;
}
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
static void sh_eth_set_receive_align(struct sk_buff *skb)
{
- int reserve;
+ uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
- reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
if (reserve)
- skb_reserve(skb, reserve);
+ skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
}
-#else
-static void sh_eth_set_receive_align(struct sk_buff *skb)
-{
- skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
-}
-#endif
/* CPU <-> EDMAC endian convert */
@@ -1120,6 +1112,7 @@
struct sh_eth_txdesc *txdesc = NULL;
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
@@ -1132,21 +1125,21 @@
for (i = 0; i < mdp->num_rx_ring; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
- skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
+ skb = netdev_alloc_skb(ndev, skbuff_size);
mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
+ /* The size of the buffer is a multiple of 16 bytes. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
rxdesc->addr = virt_to_phys(skb->data);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
- /* The size of the buffer is 16 byte boundary. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
/* Rx descriptor address set */
if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
@@ -1399,6 +1392,7 @@
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
@@ -1447,7 +1441,7 @@
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
- mdp->rx_buf_sz,
+ ALIGN(mdp->rx_buf_sz, 16),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1467,13 +1461,13 @@
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
if (mdp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
+ skb = netdev_alloc_skb(ndev, skbuff_size);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
+ dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length, DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(skb->data);
@@ -2043,6 +2037,8 @@
if (ret)
goto out_free_irq;
+ mdp->is_opened = 1;
+
return ret;
out_free_irq:
@@ -2132,6 +2128,36 @@
return NETDEV_TX_OK;
}
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (sh_eth_is_rz_fast_ether(mdp))
+ return &ndev->stats;
+
+ if (!mdp->is_opened)
+ return &ndev->stats;
+
+ ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+ sh_eth_write(ndev, 0, TROCR); /* (write clear) */
+ ndev->stats.collisions += sh_eth_read(ndev, CDCR);
+ sh_eth_write(ndev, 0, CDCR); /* (write clear) */
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+ sh_eth_write(ndev, 0, LCCR); /* (write clear) */
+
+ if (sh_eth_is_gether(mdp)) {
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+ sh_eth_write(ndev, 0, CERCR); /* (write clear) */
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+ sh_eth_write(ndev, 0, CEECR); /* (write clear) */
+ } else {
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+ sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
+ }
+
+ return &ndev->stats;
+}
+
/* device close function */
static int sh_eth_close(struct net_device *ndev)
{
@@ -2146,6 +2172,7 @@
sh_eth_write(ndev, 0, EDTRR);
sh_eth_write(ndev, 0, EDRRR);
+ sh_eth_get_stats(ndev);
/* PHY Disconnect */
if (mdp->phydev) {
phy_stop(mdp->phydev);
@@ -2164,38 +2191,11 @@
pm_runtime_put_sync(&mdp->pdev->dev);
+ mdp->is_opened = 0;
+
return 0;
}
-static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- if (sh_eth_is_rz_fast_ether(mdp))
- return &ndev->stats;
-
- pm_runtime_get_sync(&mdp->pdev->dev);
-
- ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
- sh_eth_write(ndev, 0, TROCR); /* (write clear) */
- ndev->stats.collisions += sh_eth_read(ndev, CDCR);
- sh_eth_write(ndev, 0, CDCR); /* (write clear) */
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
- sh_eth_write(ndev, 0, LCCR); /* (write clear) */
- if (sh_eth_is_gether(mdp)) {
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
- sh_eth_write(ndev, 0, CERCR); /* (write clear) */
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
- sh_eth_write(ndev, 0, CEECR); /* (write clear) */
- } else {
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
- sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
- }
- pm_runtime_put_sync(&mdp->pdev->dev);
-
- return &ndev->stats;
-}
-
/* ioctl to device function */
static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index b37c427..22301bf 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -162,9 +162,9 @@
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
-#define SH4_SKB_RX_ALIGN 32
+#define SH_ETH_RX_ALIGN 32
#else
-#define SH2_SH3_SKB_RX_ALIGN 2
+#define SH_ETH_RX_ALIGN 2
#endif
/* Register's bits
@@ -522,6 +522,7 @@
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
+ unsigned is_opened:1;
};
static inline void sh_eth_soft_swap(char *src, int len)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 3a08a1f..771cda2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -696,7 +696,7 @@
(ec->tx_max_coalesced_frames == 0))
return -EINVAL;
- if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) ||
+ if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
(ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
return -EINVAL;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 07054ce..4032b17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -276,6 +276,15 @@
plat_dat = dev_get_platdata(&pdev->dev);
+ if (!plat_dat)
+ plat_dat = devm_kzalloc(&pdev->dev,
+ sizeof(struct plat_stmmacenet_data),
+ GFP_KERNEL);
+ if (!plat_dat) {
+ pr_err("%s: ERROR: no memory", __func__);
+ return -ENOMEM;
+ }
+
/* Set default value for multicast hash bins */
plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -283,15 +292,6 @@
plat_dat->unicast_filter_entries = 1;
if (pdev->dev.of_node) {
- if (!plat_dat)
- plat_dat = devm_kzalloc(&pdev->dev,
- sizeof(struct plat_stmmacenet_data),
- GFP_KERNEL);
- if (!plat_dat) {
- pr_err("%s: ERROR: no memory", __func__);
- return -ENOMEM;
- }
-
ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
if (ret) {
pr_err("%s: main dt probe failed", __func__);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index fab0d4b..d44cd19 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -404,6 +404,7 @@
int err;
long handle;
struct xenbus_device *dev = be->dev;
+ struct xenvif *vif;
if (be->vif != NULL)
return 0;
@@ -414,13 +415,13 @@
return (err < 0) ? err : -EINVAL;
}
- be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
- if (IS_ERR(be->vif)) {
- err = PTR_ERR(be->vif);
- be->vif = NULL;
+ vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
+ if (IS_ERR(vif)) {
+ err = PTR_ERR(vif);
xenbus_dev_fatal(dev, err, "creating interface");
return err;
}
+ be->vif = vif;
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
return 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 88a70f5..2f0a9ce 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -473,9 +473,6 @@
len = skb_frag_size(frag);
offset = frag->page_offset;
- /* Data must not cross a page boundary. */
- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
-
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
@@ -483,8 +480,6 @@
while (len > 0) {
unsigned long bytes;
- BUG_ON(offset >= PAGE_SIZE);
-
bytes = PAGE_SIZE - offset;
if (bytes > len)
bytes = len;
@@ -609,6 +604,9 @@
slots, skb->len);
if (skb_linearize(skb))
goto drop;
+ data = skb->data;
+ offset = offset_in_page(data);
+ len = skb_headlen(skb);
}
spin_lock_irqsave(&queue->tx_lock, flags);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 30e97bc..d134710 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -964,8 +964,6 @@
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
- if (memblock_is_region_reserved(base, size))
- return -EBUSY;
if (nomap)
return memblock_remove(base, size);
return memblock_reserve(base, size);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 3d43874..19bb19c 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -276,6 +276,7 @@
struct resource all;
struct resource io;
+ struct resource pio;
struct resource mem;
struct resource prefetch;
struct resource busn;
@@ -658,7 +659,6 @@
{
struct tegra_pcie *pcie = sys_to_pcie(sys);
int err;
- phys_addr_t io_start;
err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
if (err < 0)
@@ -668,14 +668,12 @@
if (err)
return err;
- io_start = pci_pio_to_address(pcie->io.start);
-
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
- pci_ioremap_io(nr * SZ_64K, io_start);
+ pci_ioremap_io(pcie->pio.start, pcie->io.start);
return 1;
}
@@ -786,7 +784,6 @@
static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
{
u32 fpci_bar, size, axi_address;
- phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
/* Bar 0: type 1 extended configuration space */
fpci_bar = 0xfe100000;
@@ -799,7 +796,7 @@
/* Bar 1: downstream IO bar */
fpci_bar = 0xfdfc0000;
size = resource_size(&pcie->io);
- axi_address = io_start;
+ axi_address = pcie->io.start;
afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -1690,8 +1687,23 @@
switch (res.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
- memcpy(&pcie->io, &res, sizeof(res));
- pcie->io.name = np->full_name;
+ memcpy(&pcie->pio, &res, sizeof(res));
+ pcie->pio.name = np->full_name;
+
+ /*
+ * The Tegra PCIe host bridge uses this to program the
+ * mapping of the I/O space to the physical address,
+ * so we override the .start and .end fields here that
+ * of_pci_range_to_resource() converted to I/O space.
+ * We also set the IORESOURCE_MEM type to clarify that
+ * the resource is in the physical memory space.
+ */
+ pcie->io.start = range.cpu_addr;
+ pcie->io.end = range.cpu_addr + range.size - 1;
+ pcie->io.flags = IORESOURCE_MEM;
+ pcie->io.name = "I/O";
+
+ memcpy(&res, &pcie->io, sizeof(res));
break;
case IORESOURCE_MEM:
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d3220d3..dcd9be3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1011,8 +1011,6 @@
bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(page_out);
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
- if (*pg_index == (vcnt - 1) && *pg_offset == 0)
- memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
kunmap_atomic(kaddr);
flush_dcache_page(page_out);
@@ -1054,3 +1052,34 @@
return 1;
}
+
+/*
+ * When uncompressing data, we need to make sure and zero any parts of
+ * the biovec that were not filled in by the decompression code. pg_index
+ * and pg_offset indicate the last page and the last offset of that page
+ * that have been filled in. This will zero everything remaining in the
+ * biovec.
+ */
+void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
+ unsigned long pg_index,
+ unsigned long pg_offset)
+{
+ while (pg_index < vcnt) {
+ struct page *page = bvec[pg_index].bv_page;
+ unsigned long off = bvec[pg_index].bv_offset;
+ unsigned long len = bvec[pg_index].bv_len;
+
+ if (pg_offset < off)
+ pg_offset = off;
+ if (pg_offset < off + len) {
+ unsigned long bytes = off + len - pg_offset;
+ char *kaddr;
+
+ kaddr = kmap_atomic(page);
+ memset(kaddr + pg_offset, 0, bytes);
+ kunmap_atomic(kaddr);
+ }
+ pg_index++;
+ pg_offset = 0;
+ }
+}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 0c803b4..d181f70 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -45,7 +45,9 @@
unsigned long nr_pages);
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
-
+void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
+ unsigned long pg_index,
+ unsigned long pg_offset);
struct btrfs_compress_op {
struct list_head *(*alloc_workspace)(void);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 78285f3..617553c 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -373,6 +373,8 @@
}
done:
kunmap(pages_in[page_in_index]);
+ if (!ret)
+ btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
return ret;
}
@@ -410,10 +412,23 @@
goto out;
}
+ /*
+ * the caller is already checking against PAGE_SIZE, but lets
+ * move this check closer to the memcpy/memset
+ */
+ destlen = min_t(unsigned long, destlen, PAGE_SIZE);
bytes = min_t(unsigned long, destlen, out_len - start_byte);
kaddr = kmap_atomic(dest_page);
memcpy(kaddr, workspace->buf + start_byte, bytes);
+
+ /*
+ * btrfs_getblock is doing a zero on the tail of the page too,
+ * but this will cover anything missing from the decompressed
+ * data.
+ */
+ if (bytes < destlen)
+ memset(kaddr+bytes, 0, destlen-bytes);
kunmap_atomic(kaddr);
out:
return ret;
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 759fa4e..fb22fd8 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -299,6 +299,8 @@
zlib_inflateEnd(&workspace->strm);
if (data_in)
kunmap(pages_in[page_in_index]);
+ if (!ret)
+ btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
return ret;
}
@@ -310,10 +312,14 @@
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0;
int wbits = MAX_WBITS;
- unsigned long bytes_left = destlen;
+ unsigned long bytes_left;
unsigned long total_out = 0;
+ unsigned long pg_offset = 0;
char *kaddr;
+ destlen = min_t(unsigned long, destlen, PAGE_SIZE);
+ bytes_left = destlen;
+
workspace->strm.next_in = data_in;
workspace->strm.avail_in = srclen;
workspace->strm.total_in = 0;
@@ -341,7 +347,6 @@
unsigned long buf_start;
unsigned long buf_offset;
unsigned long bytes;
- unsigned long pg_offset = 0;
ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
if (ret != Z_OK && ret != Z_STREAM_END)
@@ -384,6 +389,17 @@
ret = 0;
zlib_inflateEnd(&workspace->strm);
+
+ /*
+ * this should only happen if zlib returned fewer bytes than we
+ * expected. btrfs_get_block is responsible for zeroing from the
+ * end of the inline extent (destlen) to the end of the page
+ */
+ if (pg_offset < destlen) {
+ kaddr = kmap_atomic(dest_page);
+ memset(kaddr + pg_offset, 0, destlen - pg_offset);
+ kunmap_atomic(kaddr);
+ }
return ret;
}
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 6df8d3d..b8b92c2 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -736,7 +736,12 @@
}
alias = d_find_alias(inode);
- if (alias && !vfat_d_anon_disconn(alias)) {
+ /*
+ * Checking "alias->d_parent == dentry->d_parent" to make sure
+ * FS is not corrupted (especially double linked dir).
+ */
+ if (alias && alias->d_parent == dentry->d_parent &&
+ !vfat_d_anon_disconn(alias)) {
/*
* This inode has non anonymous-DCACHE_DISCONNECTED
* dentry. This means, the user did ->lookup() by an
@@ -755,12 +760,9 @@
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- dentry->d_time = dentry->d_parent->d_inode->i_version;
- dentry = d_splice_alias(inode, dentry);
- if (dentry)
- dentry->d_time = dentry->d_parent->d_inode->i_version;
- return dentry;
-
+ if (!inode)
+ dentry->d_time = dir->i_version;
+ return d_splice_alias(inode, dentry);
error:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return ERR_PTR(err);
@@ -793,7 +795,6 @@
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
- dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -824,6 +825,7 @@
clear_nlink(inode);
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
+ dentry->d_time = dir->i_version;
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -849,6 +851,7 @@
clear_nlink(inode);
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
+ dentry->d_time = dir->i_version;
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -889,7 +892,6 @@
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
- dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e4dc747..1df94fa 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1853,13 +1853,12 @@
journal->j_chksum_driver = NULL;
return 0;
}
- }
- /* Precompute checksum seed for all metadata */
- if (jbd2_journal_has_csum_v2or3(journal))
+ /* Precompute checksum seed for all metadata */
journal->j_csum_seed = jbd2_chksum(journal, ~0,
sb->s_uuid,
sizeof(sb->s_uuid));
+ }
}
/* If enabling v1 checksums, downgrade superblock */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index dc9d2a2..09a819e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -201,8 +201,8 @@
#ifndef CONFIG_IP_MULTIPLE_TABLES
-#define TABLE_LOCAL_INDEX 0
-#define TABLE_MAIN_INDEX 1
+#define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
+#define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1))
static inline struct fib_table *fib_get_table(struct net *net, u32 id)
{
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index a1e8175..5b6a36c 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -429,7 +429,7 @@
header-y += virtio_pci.h
header-y += virtio_ring.h
header-y += virtio_rng.h
-header=y += vm_sockets.h
+header-y += vm_sockets.h
header-y += vt.h
header-y += wait.h
header-y += wanrouter.h
diff --git a/ipc/sem.c b/ipc/sem.c
index 454f6c6..53c3310 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -507,13 +507,6 @@
return retval;
}
- id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
- if (id < 0) {
- ipc_rcu_putref(sma, sem_rcu_free);
- return id;
- }
- ns->used_sems += nsems;
-
sma->sem_base = (struct sem *) &sma[1];
for (i = 0; i < nsems; i++) {
@@ -528,6 +521,14 @@
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
+
+ id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+ if (id < 0) {
+ ipc_rcu_putref(sma, sem_rcu_free);
+ return id;
+ }
+ ns->used_sems += nsems;
+
sem_unlock(sma, -1);
rcu_read_unlock();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 24beb9b..89e7283 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2874,10 +2874,14 @@
* or we have been woken up remotely but the IPI has not yet arrived,
* we haven't yet exited the RCU idle mode. Do it here manually until
* we find a better solution.
+ *
+ * NB: There are buggy callers of this function. Ideally we
+ * should warn if prev_state != IN_USER, but that will trigger
+ * too frequently to make sense yet.
*/
- user_exit();
+ enum ctx_state prev_state = exception_enter();
schedule();
- user_enter();
+ exception_exit(prev_state);
}
#endif
diff --git a/lib/genalloc.c b/lib/genalloc.c
index cce4dd6..2e65d20 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -598,6 +598,7 @@
return pool;
}
+EXPORT_SYMBOL(devm_gen_pool_create);
/**
* dev_get_gen_pool - Obtain the gen_pool (if any) for a device
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 0922579..5e25627 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -28,7 +28,7 @@
continue;
total += zone->present_pages;
- reserved = zone->present_pages - zone->managed_pages;
+ reserved += zone->present_pages - zone->managed_pages;
if (is_highmem_idx(zoneid))
highmem += zone->present_pages;
diff --git a/mm/frontswap.c b/mm/frontswap.c
index c30eec5..f2a3571 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -244,8 +244,10 @@
the (older) page from frontswap
*/
inc_frontswap_failed_stores();
- if (dup)
+ if (dup) {
__frontswap_clear(sis, offset);
+ frontswap_ops->invalidate_page(type, offset);
+ }
}
if (frontswap_writethrough_enabled)
/* report failure so swap also writes to swap device */
diff --git a/mm/memory.c b/mm/memory.c
index 3e50383..d5f2ae9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -815,20 +815,20 @@
if (!pte_file(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
- if (swap_duplicate(entry) < 0)
- return entry.val;
+ if (likely(!non_swap_entry(entry))) {
+ if (swap_duplicate(entry) < 0)
+ return entry.val;
- /* make sure dst_mm is on swapoff's mmlist. */
- if (unlikely(list_empty(&dst_mm->mmlist))) {
- spin_lock(&mmlist_lock);
- if (list_empty(&dst_mm->mmlist))
- list_add(&dst_mm->mmlist,
- &src_mm->mmlist);
- spin_unlock(&mmlist_lock);
- }
- if (likely(!non_swap_entry(entry)))
+ /* make sure dst_mm is on swapoff's mmlist. */
+ if (unlikely(list_empty(&dst_mm->mmlist))) {
+ spin_lock(&mmlist_lock);
+ if (list_empty(&dst_mm->mmlist))
+ list_add(&dst_mm->mmlist,
+ &src_mm->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
rss[MM_SWAPENTS]++;
- else if (is_migration_entry(entry)) {
+ } else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
if (PageAnon(page))
diff --git a/mm/mmap.c b/mm/mmap.c
index 87e82b3..ae91989 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -776,8 +776,11 @@
* shrinking vma had, to cover any anon pages imported.
*/
if (exporter && exporter->anon_vma && !importer->anon_vma) {
- if (anon_vma_clone(importer, exporter))
- return -ENOMEM;
+ int error;
+
+ error = anon_vma_clone(importer, exporter);
+ if (error)
+ return error;
importer->anon_vma = exporter->anon_vma;
}
}
@@ -2469,7 +2472,8 @@
if (err)
goto out_free_vma;
- if (anon_vma_clone(new, vma))
+ err = anon_vma_clone(new, vma);
+ if (err)
goto out_free_mpol;
if (new->vm_file)
diff --git a/mm/rmap.c b/mm/rmap.c
index 19886fb..3e4c721 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,6 +274,7 @@
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
+ int error;
/* Don't bother if the parent process has no anon_vma here. */
if (!pvma->anon_vma)
@@ -283,8 +284,9 @@
* First, attach the new VMA to the parent VMA's anon_vmas,
* so rmap can find non-COWed pages in child processes.
*/
- if (anon_vma_clone(vma, pvma))
- return -ENOMEM;
+ error = anon_vma_clone(vma, pvma);
+ if (error)
+ return error;
/* Then add our own anon_vma. */
anon_vma = anon_vma_alloc();
diff --git a/mm/slab.c b/mm/slab.c
index eb2b2ea..f34e053 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3076,7 +3076,7 @@
void *obj;
int x;
- VM_BUG_ON(nodeid > num_online_nodes());
+ VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
n = get_node(cachep, nodeid);
BUG_ON(!n);
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index d4042e7..c5afd57 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -165,6 +165,7 @@
unsigned long scanned;
unsigned long reclaimed;
+ spin_lock(&vmpr->sr_lock);
/*
* Several contexts might be calling vmpressure(), so it is
* possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@
* here. No need for any locks here since we don't care if
* vmpr->reclaimed is in sync.
*/
- if (!vmpr->scanned)
- return;
-
- spin_lock(&vmpr->sr_lock);
scanned = vmpr->scanned;
+ if (!scanned) {
+ spin_unlock(&vmpr->sr_lock);
+ return;
+ }
+
reclaimed = vmpr->reclaimed;
vmpr->scanned = 0;
vmpr->reclaimed = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index eaa057f..1ad61a2 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1523,6 +1523,7 @@
goto errout;
}
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+ put_net(net);
err = -EPERM;
goto errout;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index bb5947b..51973dd 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -247,6 +247,9 @@
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
rcu_read_unlock();
+
+ skb_set_inner_mac_header(skb, nhoff + grehlen);
+
return err;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 33f5ff0..a3f72d7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -623,6 +623,7 @@
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
+ net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
#ifdef CONFIG_TCP_MD5SIG
hash_location = tcp_parse_md5sig_option(th);
if (!sk && hash_location) {
@@ -633,7 +634,7 @@
* Incoming packet is checked with md5 hash with finding key,
* no RST generated if md5 hash doesn't match.
*/
- sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
+ sk1 = __inet_lookup_listener(net,
&tcp_hashinfo, ip_hdr(skb)->saddr,
th->source, ip_hdr(skb)->daddr,
ntohs(th->source), inet_iif(skb));
@@ -681,7 +682,6 @@
if (sk)
arg.bound_dev_if = sk->sk_bound_dev_if;
- net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d06af89..5ff8780 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -787,16 +787,16 @@
.queue_hash_add = inet6_csk_reqsk_queue_hash_add,
};
-static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
- u32 tsval, u32 tsecr, int oif,
- struct tcp_md5sig_key *key, int rst, u8 tclass,
- u32 label)
+static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
+ u32 ack, u32 win, u32 tsval, u32 tsecr,
+ int oif, struct tcp_md5sig_key *key, int rst,
+ u8 tclass, u32 label)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1;
struct sk_buff *buff;
struct flowi6 fl6;
- struct net *net = dev_net(skb_dst(skb)->dev);
+ struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
struct sock *ctl_sk = net->ipv6.tcp_sk;
unsigned int tot_len = sizeof(struct tcphdr);
struct dst_entry *dst;
@@ -946,7 +946,7 @@
(th->doff << 2);
oif = sk ? sk->sk_bound_dev_if : 0;
- tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
+ tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
#ifdef CONFIG_TCP_MD5SIG
release_sk1:
@@ -957,13 +957,13 @@
#endif
}
-static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
- u32 win, u32 tsval, u32 tsecr, int oif,
+static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
+ u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key, u8 tclass,
u32 label)
{
- tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
- label);
+ tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
+ tclass, label);
}
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -971,7 +971,7 @@
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
- tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
@@ -986,10 +986,10 @@
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
- tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
- tcp_rsk(req)->rcv_nxt,
- req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
+ tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+ tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0);
}
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index df3c7f2..9645a21 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -140,7 +140,7 @@
if (match->key->eth.type == htons(ETH_P_ARP)
|| match->key->eth.type == htons(ETH_P_RARP)) {
key_expected |= 1 << OVS_KEY_ATTR_ARP;
- if (match->mask && (match->mask->key.tp.src == htons(0xff)))
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
}
@@ -221,7 +221,7 @@
htons(NDISC_NEIGHBOUR_SOLICITATION) ||
match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
key_expected |= 1 << OVS_KEY_ATTR_ND;
- if (match->mask && (match->mask->key.tp.src == htons(0xffff)))
+ if (match->mask && (match->mask->key.tp.src == htons(0xff)))
mask_allowed |= 1 << OVS_KEY_ATTR_ND;
}
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 88618f8..c54c9d9 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -22,8 +22,9 @@
This code is considered to be experimental.
To administer these schedulers, you'll need the user-level utilities
- from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
- That package also contains some documentation; for more, check out
+ from the package iproute2+tc at
+ <https://www.kernel.org/pub/linux/utils/net/iproute2/>. That package
+ also contains some documentation; for more, check out
<http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>.
This Quality of Service (QoS) support will enable you to use
@@ -336,7 +337,7 @@
of virtual machines by allowing the generated network output to be rolled
back if needed.
- For more information, please refer to http://wiki.xensource.com/xenwiki/Remus
+ For more information, please refer to <http://wiki.xenproject.org/wiki/Remus>
Say Y here if you are using this kernel for Xen dom0 and
want to protect Xen guests with Remus.
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index b9ca32e..1e52dec 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -94,7 +94,7 @@
TC_H_MIN(skb->priority) <= q->flows_cnt)
return TC_H_MIN(skb->priority);
- filter = rcu_dereference(q->filter_list);
+ filter = rcu_dereference_bh(q->filter_list);
if (!filter)
return fq_codel_hash(q, skb) + 1;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 42dffd4..fc5e45b 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -401,12 +401,12 @@
sk = chunk->skb->sk;
/* Allocate the new skb. */
- nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
+ nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
if (!nskb)
goto nomem;
/* Make sure the outbound skb has enough header room reserved. */
- skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
+ skb_reserve(nskb, packet->overhead + MAX_HEADER);
/* Set the owning socket so that we know where to get the
* destination IP address.
diff --git a/security/keys/internal.h b/security/keys/internal.h
index b8960c4..200e378 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -117,6 +117,7 @@
#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
+#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
int (*iterator)(const void *object, void *iterator_data);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index eff88a5..4743d71 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -26,6 +26,8 @@
#include <asm/uaccess.h>
#include "internal.h"
+#define KEY_MAX_DESC_SIZE 4096
+
static int key_get_type_from_user(char *type,
const char __user *_type,
unsigned len)
@@ -78,7 +80,7 @@
description = NULL;
if (_description) {
- description = strndup_user(_description, PAGE_SIZE);
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
@@ -177,7 +179,7 @@
goto error;
/* pull the description into kernel space */
- description = strndup_user(_description, PAGE_SIZE);
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
@@ -287,7 +289,7 @@
/* fetch the name from userspace */
name = NULL;
if (_name) {
- name = strndup_user(_name, PAGE_SIZE);
+ name = strndup_user(_name, KEY_MAX_DESC_SIZE);
if (IS_ERR(name)) {
ret = PTR_ERR(name);
goto error;
@@ -562,8 +564,9 @@
{
struct key *key, *instkey;
key_ref_t key_ref;
- char *tmpbuf;
+ char *infobuf;
long ret;
+ int desclen, infolen;
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
@@ -586,38 +589,31 @@
}
okay:
- /* calculate how much description we're going to return */
- ret = -ENOMEM;
- tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmpbuf)
- goto error2;
-
key = key_ref_to_ptr(key_ref);
+ desclen = strlen(key->description);
- ret = snprintf(tmpbuf, PAGE_SIZE - 1,
- "%s;%d;%d;%08x;%s",
- key->type->name,
- from_kuid_munged(current_user_ns(), key->uid),
- from_kgid_munged(current_user_ns(), key->gid),
- key->perm,
- key->description ?: "");
-
- /* include a NUL char at the end of the data */
- if (ret > PAGE_SIZE - 1)
- ret = PAGE_SIZE - 1;
- tmpbuf[ret] = 0;
- ret++;
+ /* calculate how much information we're going to return */
+ ret = -ENOMEM;
+ infobuf = kasprintf(GFP_KERNEL,
+ "%s;%d;%d;%08x;",
+ key->type->name,
+ from_kuid_munged(current_user_ns(), key->uid),
+ from_kgid_munged(current_user_ns(), key->gid),
+ key->perm);
+ if (!infobuf)
+ goto error2;
+ infolen = strlen(infobuf);
+ ret = infolen + desclen + 1;
/* consider returning the data */
- if (buffer && buflen > 0) {
- if (buflen > ret)
- buflen = ret;
-
- if (copy_to_user(buffer, tmpbuf, buflen) != 0)
+ if (buffer && buflen >= ret) {
+ if (copy_to_user(buffer, infobuf, infolen) != 0 ||
+ copy_to_user(buffer + infolen, key->description,
+ desclen + 1) != 0)
ret = -EFAULT;
}
- kfree(tmpbuf);
+ kfree(infobuf);
error2:
key_ref_put(key_ref);
error:
@@ -649,7 +645,7 @@
if (ret < 0)
goto error;
- description = strndup_user(_description, PAGE_SIZE);
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 8177010..e72548b 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -546,7 +546,8 @@
}
if (key->expiry && ctx->now.tv_sec >= key->expiry) {
- ctx->result = ERR_PTR(-EKEYEXPIRED);
+ if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
+ ctx->result = ERR_PTR(-EKEYEXPIRED);
kleave(" = %d [expire]", ctx->skipped_ret);
goto skipped;
}
@@ -628,6 +629,10 @@
ctx->index_key.type->name,
ctx->index_key.description);
+#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
+ BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
+ (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+
if (ctx->index_key.description)
ctx->index_key.desc_len = strlen(ctx->index_key.description);
@@ -637,7 +642,6 @@
if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
keyring_compare_object(keyring, &ctx->index_key)) {
ctx->skipped_ret = 2;
- ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
case 1:
goto found;
@@ -649,8 +653,6 @@
}
ctx->skipped_ret = 0;
- if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
- ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
/* Start processing a new keyring */
descend_to_keyring:
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index bb4337c..0c7aea4 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -516,6 +516,8 @@
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_SKIP_EXPIRED),
};
struct key *key;
key_ref_t key_ref;
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 6639e2c..5d672f7 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -249,6 +249,7 @@
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
struct key *authkey;
key_ref_t authkey_ref;