Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6:
firewire: add Kconfig help on building both stacks
firewire: fix async reception on big endian machines
diff --git a/Makefile b/Makefile
index 8a3c271..c6f7b71 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 22
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Holy Dancing Manatees, Batman!
# *DOCUMENTATION*
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 47ff676..ddf9184 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -53,7 +53,7 @@
/*
* Called after processes are frozen, but before we shutdown devices.
*/
-static int at91_pm_prepare(suspend_state_t state)
+static int at91_pm_set_target(suspend_state_t state)
{
target_state = state;
return 0;
@@ -201,7 +201,7 @@
static struct pm_ops at91_pm_ops ={
.valid = at91_pm_valid_state,
- .prepare = at91_pm_prepare,
+ .set_target = at91_pm_set_target,
.enter = at91_pm_enter,
};
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index 6d59378..f6e4694 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -65,7 +65,8 @@
void mtrr_save_fixed_ranges(void *info)
{
- get_fixed_ranges(mtrr_state.fixed_ranges);
+ if (cpu_has_mtrr)
+ get_fixed_ranges(mtrr_state.fixed_ranges);
}
static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
@@ -469,11 +470,6 @@
}
}
- if (base < 0x100) {
- printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
- base, size);
- return -EINVAL;
- }
/* Check upper bits of base and last are equal and lower bits are 0
for base and 1 for last */
last = base + size - 1;
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index 2d7a510..c6401f9 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,7 @@
mr r11,r3 /* r11 holds tv */
mr r10,r4 /* r10 holds tz */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
- cmpldi r10,0 /* check if tv is NULL */
+ cmpldi r11,0 /* check if tv is NULL */
beq 2f
bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index e60d283..6b6165d3 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -329,6 +329,10 @@
if (err != HV_EOK)
printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
ino, cpuid, err);
+ err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
+ if (err != HV_EOK)
+ printk("sun4v_intr_setstate(%x): "
+ "err(%d)\n", ino, err);
err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
if (err != HV_EOK)
printk("sun4v_intr_setenabled(%x): err(%d)\n",
@@ -400,6 +404,12 @@
"err(%d)\n",
dev_handle, dev_ino, cpuid, err);
err = sun4v_vintr_set_state(dev_handle, dev_ino,
+ HV_INTR_STATE_IDLE);
+ if (err != HV_EOK)
+ printk("sun4v_vintr_set_state(%lx,%lx,"
+ "HV_INTR_STATE_IDLE): err(%d)\n",
+ dev_handle, dev_ino, err);
+ err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_ENABLED);
if (err != HV_EOK)
printk("sun4v_vintr_set_state(%lx,%lx,"
@@ -420,7 +430,7 @@
dev_handle = ino & IMAP_IGN;
dev_ino = ino & IMAP_INO;
- err = sun4v_vintr_set_state(dev_handle, dev_ino,
+ err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
if (err != HV_EOK)
printk("sun4v_vintr_set_state(%lx,%lx,"
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index 6d7d415..3cd79ca 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -1398,7 +1398,7 @@
if (!asus_hotk_found) {
acpi_bus_unregister_driver(&asus_hotk_driver);
remove_proc_entry(PROC_ASUS, acpi_root_dir);
- return result;
+ return -ENODEV;
}
asus_backlight_device = backlight_device_register("asus",NULL,NULL,
@@ -1407,6 +1407,7 @@
printk(KERN_ERR "Could not register asus backlight device\n");
asus_backlight_device = NULL;
asus_acpi_exit();
+ return -ENODEV;
}
asus_backlight_device->props.max_brightness = 15;
diff --git a/drivers/misc/blink.c b/drivers/misc/blink.c
index 634431c..97f7253 100644
--- a/drivers/misc/blink.c
+++ b/drivers/misc/blink.c
@@ -16,12 +16,30 @@
add_timer(&blink_timer);
}
-static int blink_init(void)
+static int blink_panic_event(struct notifier_block *blk,
+ unsigned long event, void *arg)
{
- printk(KERN_INFO "Enabling keyboard blinking\n");
do_blink(0);
return 0;
}
+static struct notifier_block blink_notify = {
+ .notifier_call = blink_panic_event,
+};
+
+static __init int blink_init(void)
+{
+ printk(KERN_INFO "Enabling keyboard blinking\n");
+ atomic_notifier_chain_register(&panic_notifier_list, &blink_notify);
+ return 0;
+}
+
+static __exit void blink_remove(void)
+{
+ del_timer_sync(&blink_timer);
+ atomic_notifier_chain_unregister(&panic_notifier_list, &blink_notify);
+}
+
module_init(blink_init);
+module_exit(blink_remove);
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 3098960..3078c41 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -509,7 +509,7 @@
IRDA_DEBUG(0, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
__FUNCTION__, iir, lsr, iobase);
- IRDA_DEBUG(0, "%s(), transmitting=%d, remain=%d, done=%d\n",
+ IRDA_DEBUG(0, "%s(), transmitting=%d, remain=%d, done=%td\n",
__FUNCTION__, self->transmitting, self->tx_buff.len,
self->tx_buff.data - self->tx_buff.head);
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 2803b37..36ab983 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -79,7 +79,7 @@
MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
MODULE_LICENSE("GPL");
-static int smsc_nopnp;
+static int smsc_nopnp = 1;
module_param_named(nopnp, smsc_nopnp, bool, 0);
MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings");
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 2687c77..114aefa 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
-struct mm_struct;
+#include <linux/sched.h>
struct vm_area_struct;
#endif
diff --git a/include/asm-sparc64/mdesc.h b/include/asm-sparc64/mdesc.h
index 124eb8c..c638398 100644
--- a/include/asm-sparc64/mdesc.h
+++ b/include/asm-sparc64/mdesc.h
@@ -15,6 +15,7 @@
u64 node;
unsigned int unique_id;
unsigned int num_arcs;
+ unsigned int irqs[2];
struct property *properties;
struct mdesc_node *hash_next;
struct mdesc_node *allnodes_next;
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h
index 7af1e11..349d1d3 100644
--- a/include/asm-sparc64/tlb.h
+++ b/include/asm-sparc64/tlb.h
@@ -2,6 +2,7 @@
#define _SPARC64_TLB_H
#include <linux/swap.h>
+#include <linux/pagemap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fbf3766..086a0e5 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -748,6 +748,17 @@
static inline void pci_block_user_cfg_access(struct pci_dev *dev) { }
static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) { }
+static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
+ unsigned int devfn)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
+ unsigned int devfn)
+{ return NULL; }
+
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 87545e0..b2c4fde 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -110,37 +110,67 @@
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
/**
- * struct pm_ops - Callbacks for managing platform dependent suspend states.
- * @valid: Callback to determine whether the given state can be entered.
- * Valid states are advertised in /sys/power/state but can still
- * be rejected by prepare or enter if the conditions aren't right.
- * There is a %pm_valid_only_mem function available that can be assigned
- * to this if you only implement mem sleep.
+ * struct pm_ops - Callbacks for managing platform dependent system sleep
+ * states.
*
- * @prepare: Prepare the platform for the given suspend state. Can return a
- * negative error code if necessary.
+ * @valid: Callback to determine if given system sleep state is supported by
+ * the platform.
+ * Valid (ie. supported) states are advertised in /sys/power/state. Note
+ * that it still may be impossible to enter given system sleep state if the
+ * conditions aren't right.
+ * There is the %pm_valid_only_mem function available that can be assigned
+ * to this if the platform only supports mem sleep.
*
- * @enter: Enter the given suspend state, must be assigned. Can return a
- * negative error code if necessary.
+ * @set_target: Tell the platform which system sleep state is going to be
+ * entered.
+ * @set_target() is executed right prior to suspending devices. The
+ * information conveyed to the platform code by @set_target() should be
+ * disregarded by the platform as soon as @finish() is executed and if
+ * @prepare() fails. If @set_target() fails (ie. returns nonzero),
+ * @prepare(), @enter() and @finish() will not be called by the PM core.
+ * This callback is optional. However, if it is implemented, the argument
+ * passed to @prepare(), @enter() and @finish() is meaningless and should
+ * be ignored.
*
- * @finish: Called when the system has left the given state and all devices
- * are resumed. The return value is ignored.
+ * @prepare: Prepare the platform for entering the system sleep state indicated
+ * by @set_target() or represented by the argument if @set_target() is not
+ * implemented.
+ * @prepare() is called right after devices have been suspended (ie. the
+ * appropriate .suspend() method has been executed for each device) and
+ * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
+ * This callback is optional. It returns 0 on success or a negative
+ * error code otherwise, in which case the system cannot enter the desired
+ * sleep state (@enter() and @finish() will not be called in that case).
+ *
+ * @enter: Enter the system sleep state indicated by @set_target() or
+ * represented by the argument if @set_target() is not implemented.
+ * This callback is mandatory. It returns 0 on success or a negative
+ * error code otherwise, in which case the system cannot enter the desired
+ * sleep state.
+ *
+ * @finish: Called when the system has just left a sleep state, right after
+ * the nonboot CPUs have been enabled and before devices are resumed (it is
+ * executed with IRQs enabled). If @set_target() is not implemented, the
+ * argument represents the sleep state being left.
+ * This callback is optional, but should be implemented by the platforms
+ * that implement @prepare(). If implemented, it is always called after
+ * @enter() (even if @enter() fails).
*/
struct pm_ops {
int (*valid)(suspend_state_t state);
+ int (*set_target)(suspend_state_t state);
int (*prepare)(suspend_state_t state);
int (*enter)(suspend_state_t state);
int (*finish)(suspend_state_t state);
};
+extern struct pm_ops *pm_ops;
+
/**
* pm_set_ops - set platform dependent power management ops
* @pm_ops: The new power management operations to set.
*/
extern void pm_set_ops(struct pm_ops *pm_ops);
-extern struct pm_ops *pm_ops;
-extern int pm_suspend(suspend_state_t state);
-
extern int pm_valid_only_mem(suspend_state_t state);
/**
@@ -161,6 +191,8 @@
*/
extern void arch_suspend_enable_irqs(void);
+extern int pm_suspend(suspend_state_t state);
+
/*
* Device power management
*/
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 8812985..fc45ed2 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/pm.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/resume-trace.h>
@@ -97,6 +96,11 @@
}
}
+ if (pm_ops->set_target) {
+ error = pm_ops->set_target(state);
+ if (error)
+ goto Thaw;
+ }
suspend_console();
error = device_suspend(PMSG_SUSPEND);
if (error) {
diff --git a/mm/slab.c b/mm/slab.c
index 6d65cf4..a9c4472 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -774,7 +774,6 @@
*/
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
- WARN_ON_ONCE(size == 0);
while (size > csizep->cs_size)
csizep++;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 758dafe..cf40ff9 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -72,7 +72,8 @@
netif_tx_unlock(dev);
local_irq_restore(flags);
- schedule_delayed_work(&npinfo->tx_work, HZ/10);
+ if (atomic_read(&npinfo->refcnt))
+ schedule_delayed_work(&npinfo->tx_work, HZ/10);
return;
}
netif_tx_unlock(dev);
@@ -250,22 +251,23 @@
unsigned long flags;
local_irq_save(flags);
- if (netif_tx_trylock(dev)) {
- /* try until next clock tick */
- for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
- tries > 0; --tries) {
+ /* try until next clock tick */
+ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
+ tries > 0; --tries) {
+ if (netif_tx_trylock(dev)) {
if (!netif_queue_stopped(dev))
status = dev->hard_start_xmit(skb, dev);
+ netif_tx_unlock(dev);
if (status == NETDEV_TX_OK)
break;
- /* tickle device maybe there is some cleanup */
- netpoll_poll(np);
-
- udelay(USEC_PER_POLL);
}
- netif_tx_unlock(dev);
+
+ /* tickle device maybe there is some cleanup */
+ netpoll_poll(np);
+
+ udelay(USEC_PER_POLL);
}
local_irq_restore(flags);
}
@@ -784,9 +786,15 @@
if (atomic_dec_and_test(&npinfo->refcnt)) {
skb_queue_purge(&npinfo->arp_tx);
skb_queue_purge(&npinfo->txq);
- cancel_rearming_delayed_work(&npinfo->tx_work);
+ cancel_delayed_work(&npinfo->tx_work);
flush_scheduled_work();
+ /* clean after last, unfinished work */
+ if (!skb_queue_empty(&npinfo->txq)) {
+ struct sk_buff *skb;
+ skb = __skb_dequeue(&npinfo->txq);
+ kfree_skb(skb);
+ }
kfree(npinfo);
}
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6edaaa0..67861a8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3375,12 +3375,13 @@
sctp_assoc_t associd;
int retval = 0;
- if (len != sizeof(status)) {
+ if (len < sizeof(status)) {
retval = -EINVAL;
goto out;
}
- if (copy_from_user(&status, optval, sizeof(status))) {
+ len = sizeof(status);
+ if (copy_from_user(&status, optval, len)) {
retval = -EFAULT;
goto out;
}
@@ -3452,12 +3453,13 @@
struct sctp_transport *transport;
int retval = 0;
- if (len != sizeof(pinfo)) {
+ if (len < sizeof(pinfo)) {
retval = -EINVAL;
goto out;
}
- if (copy_from_user(&pinfo, optval, sizeof(pinfo))) {
+ len = sizeof(pinfo);
+ if (copy_from_user(&pinfo, optval, len)) {
retval = -EFAULT;
goto out;
}
@@ -3523,8 +3525,11 @@
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
- if (len != sizeof(struct sctp_event_subscribe))
+ if (len < sizeof(struct sctp_event_subscribe))
return -EINVAL;
+ len = sizeof(struct sctp_event_subscribe);
+ if (put_user(len, optlen))
+ return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
return -EFAULT;
return 0;
@@ -3546,9 +3551,12 @@
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
return -EOPNOTSUPP;
- if (len != sizeof(int))
+ if (len < sizeof(int))
return -EINVAL;
- if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
+ len = sizeof(int);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
return -EFAULT;
return 0;
}
@@ -3599,8 +3607,9 @@
int retval = 0;
struct sctp_association *asoc;
- if (len != sizeof(sctp_peeloff_arg_t))
+ if (len < sizeof(sctp_peeloff_arg_t))
return -EINVAL;
+ len = sizeof(sctp_peeloff_arg_t);
if (copy_from_user(&peeloff, optval, len))
return -EFAULT;
@@ -3628,6 +3637,8 @@
/* Return the fd mapped to the new socket. */
peeloff.sd = retval;
+ if (put_user(len, optlen))
+ return -EFAULT;
if (copy_to_user(optval, &peeloff, len))
retval = -EFAULT;
@@ -3736,9 +3747,9 @@
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
- if (len != sizeof(struct sctp_paddrparams))
+ if (len < sizeof(struct sctp_paddrparams))
return -EINVAL;
-
+ len = sizeof(struct sctp_paddrparams);
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
@@ -3837,9 +3848,11 @@
struct sctp_association *asoc = NULL;
struct sctp_sock *sp = sctp_sk(sk);
- if (len != sizeof(struct sctp_assoc_value))
+ if (len < sizeof(struct sctp_assoc_value))
return - EINVAL;
+ len = sizeof(struct sctp_assoc_value);
+
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
@@ -3888,8 +3901,11 @@
*/
static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
{
- if (len != sizeof(struct sctp_initmsg))
+ if (len < sizeof(struct sctp_initmsg))
return -EINVAL;
+ len = sizeof(struct sctp_initmsg);
+ if (put_user(len, optlen))
+ return -EFAULT;
if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
return -EFAULT;
return 0;
@@ -3904,7 +3920,7 @@
struct list_head *pos;
int cnt = 0;
- if (len != sizeof(sctp_assoc_t))
+ if (len < sizeof(sctp_assoc_t))
return -EINVAL;
if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
@@ -3940,10 +3956,12 @@
struct sctp_sock *sp = sctp_sk(sk);
int addrlen;
- if (len != sizeof(struct sctp_getaddrs_old))
+ if (len < sizeof(struct sctp_getaddrs_old))
return -EINVAL;
- if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old)))
+ len = sizeof(struct sctp_getaddrs_old);
+
+ if (copy_from_user(&getaddrs, optval, len))
return -EFAULT;
if (getaddrs.addr_num <= 0) return -EINVAL;
@@ -3966,7 +3984,9 @@
if (cnt >= getaddrs.addr_num) break;
}
getaddrs.addr_num = cnt;
- if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old)))
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &getaddrs, len))
return -EFAULT;
return 0;
@@ -3999,8 +4019,7 @@
return -EINVAL;
to = optval + offsetof(struct sctp_getaddrs,addrs);
- space_left = len - sizeof(struct sctp_getaddrs) -
- offsetof(struct sctp_getaddrs,addrs);
+ space_left = len - offsetof(struct sctp_getaddrs,addrs);
list_for_each(pos, &asoc->peer.transport_addr_list) {
from = list_entry(pos, struct sctp_transport, transports);
@@ -4037,7 +4056,7 @@
rwlock_t *addr_lock;
int cnt = 0;
- if (len != sizeof(sctp_assoc_t))
+ if (len < sizeof(sctp_assoc_t))
return -EINVAL;
if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
@@ -4179,10 +4198,11 @@
void *buf;
int bytes_copied = 0;
- if (len != sizeof(struct sctp_getaddrs_old))
+ if (len < sizeof(struct sctp_getaddrs_old))
return -EINVAL;
- if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old)))
+ len = sizeof(struct sctp_getaddrs_old);
+ if (copy_from_user(&getaddrs, optval, len))
return -EFAULT;
if (getaddrs.addr_num <= 0) return -EINVAL;
@@ -4254,7 +4274,7 @@
/* copy the leading structure back to user */
getaddrs.addr_num = cnt;
- if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old)))
+ if (copy_to_user(optval, &getaddrs, len))
err = -EFAULT;
error:
@@ -4282,7 +4302,7 @@
void *addrs;
void *buf;
- if (len <= sizeof(struct sctp_getaddrs))
+ if (len < sizeof(struct sctp_getaddrs))
return -EINVAL;
if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
@@ -4306,8 +4326,8 @@
}
to = optval + offsetof(struct sctp_getaddrs,addrs);
- space_left = len - sizeof(struct sctp_getaddrs) -
- offsetof(struct sctp_getaddrs,addrs);
+ space_left = len - offsetof(struct sctp_getaddrs,addrs);
+
addrs = kmalloc(space_left, GFP_KERNEL);
if (!addrs)
return -ENOMEM;
@@ -4379,10 +4399,12 @@
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
- if (len != sizeof(struct sctp_prim))
+ if (len < sizeof(struct sctp_prim))
return -EINVAL;
- if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
+ len = sizeof(struct sctp_prim);
+
+ if (copy_from_user(&prim, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
@@ -4398,7 +4420,9 @@
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp,
(union sctp_addr *)&prim.ssp_addr);
- if (copy_to_user(optval, &prim, sizeof(struct sctp_prim)))
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &prim, len))
return -EFAULT;
return 0;
@@ -4415,10 +4439,15 @@
{
struct sctp_setadaptation adaptation;
- if (len != sizeof(struct sctp_setadaptation))
+ if (len < sizeof(struct sctp_setadaptation))
return -EINVAL;
+ len = sizeof(struct sctp_setadaptation);
+
adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
+
+ if (put_user(len, optlen))
+ return -EFAULT;
if (copy_to_user(optval, &adaptation, len))
return -EFAULT;
@@ -4452,9 +4481,12 @@
struct sctp_association *asoc;
struct sctp_sock *sp = sctp_sk(sk);
- if (len != sizeof(struct sctp_sndrcvinfo))
+ if (len < sizeof(struct sctp_sndrcvinfo))
return -EINVAL;
- if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo)))
+
+ len = sizeof(struct sctp_sndrcvinfo);
+
+ if (copy_from_user(&info, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
@@ -4475,7 +4507,9 @@
info.sinfo_timetolive = sp->default_timetolive;
}
- if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo)))
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &info, len))
return -EFAULT;
return 0;
@@ -4526,10 +4560,12 @@
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
- if (len != sizeof (struct sctp_rtoinfo))
+ if (len < sizeof (struct sctp_rtoinfo))
return -EINVAL;
- if (copy_from_user(&rtoinfo, optval, sizeof (struct sctp_rtoinfo)))
+ len = sizeof(struct sctp_rtoinfo);
+
+ if (copy_from_user(&rtoinfo, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
@@ -4581,11 +4617,12 @@
struct list_head *pos;
int cnt = 0;
- if (len != sizeof (struct sctp_assocparams))
+ if (len < sizeof (struct sctp_assocparams))
return -EINVAL;
- if (copy_from_user(&assocparams, optval,
- sizeof (struct sctp_assocparams)))
+ len = sizeof(struct sctp_assocparams);
+
+ if (copy_from_user(&assocparams, optval, len))
return -EFAULT;
asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
@@ -4671,9 +4708,11 @@
struct sctp_sock *sp;
struct sctp_association *asoc;
- if (len != sizeof(struct sctp_assoc_value))
+ if (len < sizeof(struct sctp_assoc_value))
return -EINVAL;
+ len = sizeof(struct sctp_assoc_value);
+
if (copy_from_user(¶ms, optval, len))
return -EFAULT;
@@ -6084,8 +6123,11 @@
* queued to the backlog. This prevents a potential race between
* backlog processing on the old socket and new-packet processing
* on the new socket.
+ *
+ * The caller has just allocated newsk so we can guarantee that other
+ * paths won't try to lock it and then oldsk.
*/
- sctp_lock_sock(newsk);
+ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
sctp_assoc_migrate(assoc, newsk);
/* If the association on the newsk is already closed before accept()
diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
index ebb1bda..2025db5 100644
--- a/sound/drivers/mts64.c
+++ b/sound/drivers/mts64.c
@@ -1048,7 +1048,7 @@
/*********************************************************************
* module init stuff
*********************************************************************/
-static void __init_or_module snd_mts64_unregister_all(void)
+static void snd_mts64_unregister_all(void)
{
int i;
diff --git a/sound/oss/sb_card.c b/sound/oss/sb_card.c
index 27acd6f..7de18b5 100644
--- a/sound/oss/sb_card.c
+++ b/sound/oss/sb_card.c
@@ -290,7 +290,7 @@
MODULE_DEVICE_TABLE(pnp_card, sb_pnp_card_table);
#endif /* CONFIG_PNP */
-static void __init_or_module sb_unregister_all(void)
+static void sb_unregister_all(void)
{
#ifdef CONFIG_PNP
if (pnp_registered)