Re: Linux 2.6.27.13

From: Greg KH
Date: Sat Jan 24 2009 - 19:52:14 EST


diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index b117e42..90f718c 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -960,9 +960,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
6stack 6-jack, separate surrounds (default)
3stack 3-stack, shared surrounds
laptop 2-channel only (FSC V2060, Samsung M50)
- laptop-eapd 2-channel with EAPD (Samsung R65, ASUS A6J)
+ laptop-eapd 2-channel with EAPD (ASUS A6J)
laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100)
ultra 2-channel with EAPD (Samsung Ultra tablet PC)
+ samsung 2-channel with EAPD (Samsung R65)

AD1988/AD1988B/AD1989A/AD1989B
6stack 6-jack
diff --git a/Makefile b/Makefile
index f0f8cdf..d879e7d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 27
-EXTRAVERSION = .12
+EXTRAVERSION = .13
NAME = Trembling Tortoise

# *DOCUMENTATION*
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 48e496f..fb7e69c 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -15,6 +15,7 @@ config IA64
select ACPI if (!IA64_HP_SIM)
select PM if (!IA64_HP_SIM)
select ARCH_SUPPORTS_MSI
+ select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_KPROBES
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index db44e02..ba51948 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -710,9 +710,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
struct slice_mask mask, available;
+ unsigned int psize = mm->context.user_psize;

mask = slice_range_to_mask(addr, len);
- available = slice_mask_for_size(mm, mm->context.user_psize);
+ available = slice_mask_for_size(mm, psize);
+#ifdef CONFIG_PPC_64K_PAGES
+ /* We need to account for 4k slices too */
+ if (psize == MMU_PAGE_64K) {
+ struct slice_mask compat_mask;
+ compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
+ or_mask(available, compat_mask);
+ }
+#endif

#if 0 /* too verbose */
slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 13946eb..b4704e1 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -576,7 +576,7 @@ static ssize_t read_rbu_image_type(struct kobject *kobj,
{
int size = 0;
if (!pos)
- size = sprintf(buffer, "%s\n", image_type);
+ size = scnprintf(buffer, count, "%s\n", image_type);
return size;
}

@@ -648,7 +648,7 @@ static ssize_t read_rbu_packet_size(struct kobject *kobj,
int size = 0;
if (!pos) {
spin_lock(&rbu_data.lock);
- size = sprintf(buffer, "%lu\n", rbu_data.packetsize);
+ size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
spin_unlock(&rbu_data.lock);
}
return size;
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index d9e7a49..58a5efb 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1153,7 +1153,7 @@ static int __init abituguru3_dmi_detect(void)

static inline int abituguru3_dmi_detect(void)
{
- return -ENODEV;
+ return 1;
}

#endif /* CONFIG_DMI */
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index c54eff9..bfc2961 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -180,6 +180,7 @@ static struct vrm_model vrm_models[] = {
{X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
{X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */
{X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */
+ {X86_VENDOR_AMD, 0x10, ANY, ANY, 25}, /* NPT family 10h */
{X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */
{X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
{X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index b4882cc..d32c1ee 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -904,7 +904,7 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
part_sn2->remote_vars_pa);

- part->last_heartbeat = remote_vars->heartbeat;
+ part->last_heartbeat = remote_vars->heartbeat - 1;
dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
part->last_heartbeat);

diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index b5d6b9a..b427978 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1075,7 +1075,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
{
unsigned int i;
int ret;
- char stir421x_fw_name[11];
+ char stir421x_fw_name[12];
const struct firmware *fw;
const unsigned char *fw_version_ptr; /* pointer to version string */
unsigned long fw_version = 0;
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 5d86281..c71982d 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
#include <asm/processor.h>

#define DRV_NAME "r6040"
-#define DRV_VERSION "0.18"
-#define DRV_RELDATE "13Jul2008"
+#define DRV_VERSION "0.19"
+#define DRV_RELDATE "18Dec2008"

/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -214,7 +214,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
/* Wait for the read bit to be cleared */
while (limit--) {
cmd = ioread16(ioaddr + MMDIO);
- if (cmd & MDIO_READ)
+ if (!(cmd & MDIO_READ))
break;
}

@@ -233,7 +233,7 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val
/* Wait for the write bit to be cleared */
while (limit--) {
cmd = ioread16(ioaddr + MMDIO);
- if (cmd & MDIO_WRITE)
+ if (!(cmd & MDIO_WRITE))
break;
}
}
@@ -681,8 +681,10 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 status;
+ u16 misr, status;

+ /* Save MIER */
+ misr = ioread16(ioaddr + MIER);
/* Mask off RDC MAC interrupt */
iowrite16(MSK_INT, ioaddr + MIER);
/* Read MISR status and clear */
@@ -702,7 +704,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
dev->stats.rx_fifo_errors++;

/* Mask off RX interrupt */
- iowrite16(ioread16(ioaddr + MIER) & ~RX_INTS, ioaddr + MIER);
+ misr &= ~RX_INTS;
netif_rx_schedule(dev, &lp->napi);
}

@@ -710,6 +712,9 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
if (status & TX_INTS)
r6040_tx(dev);

+ /* Restore RDC MAC interrupt */
+ iowrite16(misr, ioaddr + MIER);
+
return IRQ_HANDLED;
}

diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index 6dbfed0..69120b5 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -729,7 +729,7 @@ ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
AR_AN_TOP2_LOCALBIAS,
AR_AN_TOP2_LOCALBIAS_S,
pModal->local_bias);
- DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n",
+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "ForceXPAon: %d\n",
pModal->force_xpaon);
REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
pModal->force_xpaon);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index cbaca23..bfff6b5 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -53,6 +53,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
{USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
{USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
{USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 9761eaa..45bf8f7 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2113,6 +2113,7 @@ static struct usb_device_id rt73usb_device_table[] = {
/* Linksys */
{ USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
+ { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
/* MSI */
{ USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index aa6fda1..8a82a62 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -33,6 +33,11 @@ struct endpoint_state {
struct pcie_link_state {
struct list_head sibiling;
struct pci_dev *pdev;
+ bool downstream_has_switch;
+
+ struct pcie_link_state *parent;
+ struct list_head children;
+ struct list_head link;

/* ASPM state */
unsigned int support_state;
@@ -125,7 +130,7 @@ static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
link_state->clk_pm_enabled = !!enable;
}

-static void pcie_check_clock_pm(struct pci_dev *pdev)
+static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
{
int pos;
u32 reg32;
@@ -149,10 +154,26 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
- link_state->clk_pm_capable = capable;
link_state->clk_pm_enabled = enabled;
link_state->bios_clk_state = enabled;
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
+ if (!blacklist) {
+ link_state->clk_pm_capable = capable;
+ pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
+ } else {
+ link_state->clk_pm_capable = 0;
+ pcie_set_clock_pm(pdev, 0);
+ }
+}
+
+static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
+{
+ struct pci_dev *child_dev;
+
+ list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
+ if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
+ return true;
+ }
+ return false;
}

/*
@@ -419,9 +440,9 @@ static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
{
struct pci_dev *child_dev;

- /* If no child, disable the link */
+ /* If no child, ignore the link */
if (list_empty(&pdev->subordinate->devices))
- return 0;
+ return state;
list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
/*
@@ -462,6 +483,9 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
int valid = 1;
struct pcie_link_state *link_state = pdev->link_state;

+ /* If no child, disable the link */
+ if (list_empty(&pdev->subordinate->devices))
+ state = 0;
/*
* if the downstream component has pci bridge function, don't do ASPM
* now
@@ -493,20 +517,52 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
link_state->enabled_state = state;
}

+static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
+{
+ struct pcie_link_state *root_port_link = link;
+ while (root_port_link->parent)
+ root_port_link = root_port_link->parent;
+ return root_port_link;
+}
+
+/* check the whole hierarchy, and configure each link in the hierarchy */
static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
unsigned int state)
{
struct pcie_link_state *link_state = pdev->link_state;
+ struct pcie_link_state *root_port_link = get_root_port_link(link_state);
+ struct pcie_link_state *leaf;

- if (link_state->support_state == 0)
- return;
state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;

- /* state 0 means disabling aspm */
- state = pcie_aspm_check_state(pdev, state);
+ /* check all links who have specific root port link */
+ list_for_each_entry(leaf, &link_list, sibiling) {
+ if (!list_empty(&leaf->children) ||
+ get_root_port_link(leaf) != root_port_link)
+ continue;
+ state = pcie_aspm_check_state(leaf->pdev, state);
+ }
+ /* check root port link too in case it hasn't children */
+ state = pcie_aspm_check_state(root_port_link->pdev, state);
+
if (link_state->enabled_state == state)
return;
- __pcie_aspm_config_link(pdev, state);
+
+ /*
+ * we must change the hierarchy. See comments in
+ * __pcie_aspm_config_link for the order
+ **/
+ if (state & PCIE_LINK_STATE_L1) {
+ list_for_each_entry(leaf, &link_list, sibiling) {
+ if (get_root_port_link(leaf) == root_port_link)
+ __pcie_aspm_config_link(leaf->pdev, state);
+ }
+ } else {
+ list_for_each_entry_reverse(leaf, &link_list, sibiling) {
+ if (get_root_port_link(leaf) == root_port_link)
+ __pcie_aspm_config_link(leaf->pdev, state);
+ }
+ }
}

/*
@@ -570,6 +626,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
unsigned int state;
struct pcie_link_state *link_state;
int error = 0;
+ int blacklist;

if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
@@ -580,29 +637,58 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
if (list_empty(&pdev->subordinate->devices))
goto out;

- if (pcie_aspm_sanity_check(pdev))
- goto out;
+ blacklist = !!pcie_aspm_sanity_check(pdev);

mutex_lock(&aspm_lock);

link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
if (!link_state)
goto unlock_out;
- pdev->link_state = link_state;

- pcie_aspm_configure_common_clock(pdev);
+ link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
+ INIT_LIST_HEAD(&link_state->children);
+ INIT_LIST_HEAD(&link_state->link);
+ if (pdev->bus->self) {/* this is a switch */
+ struct pcie_link_state *parent_link_state;

- pcie_aspm_cap_init(pdev);
+ parent_link_state = pdev->bus->parent->self->link_state;
+ if (!parent_link_state) {
+ kfree(link_state);
+ goto unlock_out;
+ }
+ list_add(&link_state->link, &parent_link_state->children);
+ link_state->parent = parent_link_state;
+ }

- /* config link state to avoid BIOS error */
- state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev));
- __pcie_aspm_config_link(pdev, state);
+ pdev->link_state = link_state;

- pcie_check_clock_pm(pdev);
+ if (!blacklist) {
+ pcie_aspm_configure_common_clock(pdev);
+ pcie_aspm_cap_init(pdev);
+ } else {
+ link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
+ link_state->bios_aspm_state = 0;
+ /* Set support state to 0, so we will disable ASPM later */
+ link_state->support_state = 0;
+ }

link_state->pdev = pdev;
list_add(&link_state->sibiling, &link_list);

+ if (link_state->downstream_has_switch) {
+ /*
+ * If link has switch, delay the link config. The leaf link
+ * initialization will config the whole hierarchy. but we must
+ * make sure BIOS doesn't set unsupported link state
+ **/
+ state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
+ __pcie_aspm_config_link(pdev, state);
+ } else
+ __pcie_aspm_configure_link_state(pdev,
+ policy_to_aspm_state(pdev));
+
+ pcie_check_clock_pm(pdev, blacklist);
+
unlock_out:
if (error)
free_link_state(pdev);
@@ -635,6 +721,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
/* All functions are removed, so just disable ASPM for the link */
__pcie_aspm_config_one_dev(parent, 0);
list_del(&link_state->sibiling);
+ list_del(&link_state->link);
/* Clock PM is for endpoint device */

free_link_state(parent);
diff --git a/drivers/usb/storage/libusual.c b/drivers/usb/storage/libusual.c
index d617e8a..f970b27 100644
--- a/drivers/usb/storage/libusual.c
+++ b/drivers/usb/storage/libusual.c
@@ -46,6 +46,12 @@ static int usu_probe_thread(void *arg);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }

+#define COMPLIANT_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
+ vendorName, productName, useProtocol, useTransport, \
+ initFunction, flags) \
+{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
+ .driver_info = (flags) }
+
#define USUAL_DEV(useProto, useTrans, useType) \
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
.driver_info = ((useType)<<24) }
@@ -57,6 +63,7 @@ struct usb_device_id storage_usb_ids [] = {

#undef USUAL_DEV
#undef UNUSUAL_DEV
+#undef COMPLIANT_DEV

MODULE_DEVICE_TABLE(usb, storage_usb_ids);
EXPORT_SYMBOL_GPL(storage_usb_ids);
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 09779f6..620c2b5 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -59,6 +59,13 @@
#include "transport.h"
#include "protocol.h"

+/* Vendor IDs for companies that seem to include the READ CAPACITY bug
+ * in all their devices
+ */
+#define VENDOR_ID_NOKIA 0x0421
+#define VENDOR_ID_NIKON 0x04b0
+#define VENDOR_ID_MOTOROLA 0x22b8
+
/***********************************************************************
* Host functions
***********************************************************************/
@@ -134,6 +141,22 @@ static int slave_configure(struct scsi_device *sdev)
* settings can't be overridden via the scsi devinfo mechanism. */
if (sdev->type == TYPE_DISK) {

+ /* Some vendors seem to put the READ CAPACITY bug into
+ * all their devices -- primarily makers of cell phones
+ * and digital cameras. Since these devices always use
+ * flash media and can be expected to have an even number
+ * of sectors, we will always enable the CAPACITY_HEURISTICS
+ * flag unless told otherwise. */
+ switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
+ case VENDOR_ID_NOKIA:
+ case VENDOR_ID_NIKON:
+ case VENDOR_ID_MOTOROLA:
+ if (!(us->fflags & (US_FL_FIX_CAPACITY |
+ US_FL_CAPACITY_OK)))
+ us->fflags |= US_FL_CAPACITY_HEURISTICS;
+ break;
+ }
+
/* Disk-type devices use MODE SENSE(6) if the protocol
* (SubClass) is Transparent SCSI, otherwise they use
* MODE SENSE(10). */
@@ -196,6 +219,14 @@ static int slave_configure(struct scsi_device *sdev)
* sector in a larger then 1 sector read, since the performance
* impact is negible we set this flag for all USB disks */
sdev->last_sector_bug = 1;
+
+ /* Enable last-sector hacks for single-target devices using
+ * the Bulk-only transport, unless we already know the
+ * capacity will be decremented or is correct. */
+ if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK |
+ US_FL_SCM_MULT_TARG)) &&
+ us->protocol == US_PR_BULK)
+ us->use_last_sector_hacks = 1;
} else {

/* Non-disk-type devices don't need to blacklist any pages
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 3523a0b..861e308 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -57,6 +57,9 @@
#include "scsiglue.h"
#include "debug.h"

+#include <linux/blkdev.h>
+#include "../../scsi/sd.h"
+

/***********************************************************************
* Data transfer routines
@@ -511,6 +514,80 @@ int usb_stor_bulk_transfer_sg(struct us_data* us, unsigned int pipe,
* Transport routines
***********************************************************************/

+/* There are so many devices that report the capacity incorrectly,
+ * this routine was written to counteract some of the resulting
+ * problems.
+ */
+static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
+{
+ struct gendisk *disk;
+ struct scsi_disk *sdkp;
+ u32 sector;
+
+ /* To Report "Medium Error: Record Not Found */
+ static unsigned char record_not_found[18] = {
+ [0] = 0x70, /* current error */
+ [2] = MEDIUM_ERROR, /* = 0x03 */
+ [7] = 0x0a, /* additional length */
+ [12] = 0x14 /* Record Not Found */
+ };
+
+ /* If last-sector problems can't occur, whether because the
+ * capacity was already decremented or because the device is
+ * known to report the correct capacity, then we don't need
+ * to do anything.
+ */
+ if (!us->use_last_sector_hacks)
+ return;
+
+ /* Was this command a READ(10) or a WRITE(10)? */
+ if (srb->cmnd[0] != READ_10 && srb->cmnd[0] != WRITE_10)
+ goto done;
+
+ /* Did this command access the last sector? */
+ sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
+ (srb->cmnd[4] << 8) | (srb->cmnd[5]);
+ disk = srb->request->rq_disk;
+ if (!disk)
+ goto done;
+ sdkp = scsi_disk(disk);
+ if (!sdkp)
+ goto done;
+ if (sector + 1 != sdkp->capacity)
+ goto done;
+
+ if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
+
+ /* The command succeeded. We know this device doesn't
+ * have the last-sector bug, so stop checking it.
+ */
+ us->use_last_sector_hacks = 0;
+
+ } else {
+ /* The command failed. Allow up to 3 retries in case this
+ * is some normal sort of failure. After that, assume the
+ * capacity is wrong and we're trying to access the sector
+ * beyond the end. Replace the result code and sense data
+ * with values that will cause the SCSI core to fail the
+ * command immediately, instead of going into an infinite
+ * (or even just a very long) retry loop.
+ */
+ if (++us->last_sector_retries < 3)
+ return;
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ memcpy(srb->sense_buffer, record_not_found,
+ sizeof(record_not_found));
+ }
+
+ done:
+ /* Don't reset the retry counter for TEST UNIT READY commands,
+ * because they get issued after device resets which might be
+ * caused by a failed last-sector access.
+ */
+ if (srb->cmnd[0] != TEST_UNIT_READY)
+ us->last_sector_retries = 0;
+}
+
/* Invoke the transport and basic error-handling/recovery methods
*
* This is used by the protocol layers to actually send the message to
@@ -544,6 +621,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
/* if the transport provided its own sense data, don't auto-sense */
if (result == USB_STOR_TRANSPORT_NO_SENSE) {
srb->result = SAM_STAT_CHECK_CONDITION;
+ last_sector_hacks(us, srb);
return;
}

@@ -667,6 +745,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
srb->result = (DID_ERROR << 16) | (SUGGEST_RETRY << 24);

+ last_sector_hacks(us, srb);
return;

/* Error and abort processing: try to resynchronize with the device
@@ -694,6 +773,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
us->transport_reset(us);
}
clear_bit(US_FLIDX_RESETTING, &us->dflags);
+ last_sector_hacks(us, srb);
}

/* Stop the current URB transfer */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 476da5d..6fcb6d1 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -27,7 +27,8 @@

/* IMPORTANT NOTE: This file must be included in another file which does
* the following thing for it to work:
- * The macro UNUSUAL_DEV() must be defined before this file is included
+ * The UNUSUAL_DEV, COMPLIANT_DEV, and USUAL_DEV macros must be defined
+ * before this file is included.
*/

/* If you edit this file, please try to keep it sorted first by VendorID,
@@ -46,6 +47,12 @@
* <usb-storage@xxxxxxxxxxxxxxxxxxxxxxxx>
*/

+/* Note: If you add an entry only in order to set the CAPACITY_OK flag,
+ * use the COMPLIANT_DEV macro instead of UNUSUAL_DEV. This is
+ * because such entries mark devices which actually work correctly,
+ * as opposed to devices that do something strangely or wrongly.
+ */
+
/* patch submitted by Vivian Bregier <Vivian.Bregier@xxxxxxx>
*/
UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100,
@@ -160,20 +167,6 @@ UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0592,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),

-/* Reported by Filip Joelsson <filip@xxxxxxxxxxxxx> */
-UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
- "Nokia",
- "Nokia 3110c",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
-/* Patch for Nokia 5310 capacity */
-UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0701,
- "Nokia",
- "5310",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
/* Reported by Mario Rettig <mariorettig@xxxxxx> */
UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
"Nokia",
@@ -239,56 +232,6 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),

-/* Reported by Cedric Godin <cedric@xxxxxxxxxx> */
-UNUSUAL_DEV( 0x0421, 0x04b9, 0x0500, 0x0551,
- "Nokia",
- "5300",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
-/* Reported by Paulo Fessel <pfessel@xxxxxxxxx> */
-UNUSUAL_DEV( 0x0421, 0x04bd, 0x0000, 0x9999,
- "Nokia",
- "5200",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
-/* Reported by Richard Nauber <RichardNauber@xxxxxx> */
-UNUSUAL_DEV( 0x0421, 0x04fa, 0x0550, 0x0660,
- "Nokia",
- "6300",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
-/* Reported by Ozan Sener <themgzzy@xxxxxxxxx> */
-UNUSUAL_DEV( 0x0421, 0x0060, 0x0551, 0x0551,
- "Nokia",
- "3500c",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
-/* Reported by CSECSY Laszlo <boobaa@xxxxxxxxxxxxxx> */
-UNUSUAL_DEV( 0x0421, 0x0063, 0x0001, 0x0601,
- "Nokia",
- "Nokia 3109c",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
-/* Patch for Nokia 5310 capacity */
-UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
- "Nokia",
- "5310",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
-/* Submitted by Ricky Wong Yung Fei <evilbladewarrior@xxxxxxxxx> */
-/* Nokia 7610 Supernova - Too many sectors reported in usb storage mode */
-UNUSUAL_DEV( 0x0421, 0x00f5, 0x0000, 0x0470,
- "Nokia",
- "7610 Supernova",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_FIX_CAPACITY ),
-
/* Reported by Olaf Hering <olh@xxxxxxx> from novell bug #105878 */
UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
"SMSC",
@@ -692,6 +635,13 @@ UNUSUAL_DEV( 0x0525, 0xa140, 0x0100, 0x0100,
US_SC_8070, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),

+/* Added by Alan Stern <stern@xxxxxxxxxxxxxxxxxxx> */
+COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
+ "Linux",
+ "File-backed Storage Gadget",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_CAPACITY_OK ),
+
/* Yakumo Mega Image 37
* Submitted by Stephan Fuhrmann <atomenergie@xxxxxxxxxxx> */
UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 27016fd..ceb8ac3 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -126,6 +126,8 @@ MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }

+#define COMPLIANT_DEV UNUSUAL_DEV
+
#define USUAL_DEV(useProto, useTrans, useType) \
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
.driver_info = (USB_US_TYPE_STOR<<24) }
@@ -134,6 +136,7 @@ static struct usb_device_id storage_usb_ids [] = {

# include "unusual_devs.h"
#undef UNUSUAL_DEV
+#undef COMPLIANT_DEV
#undef USUAL_DEV
/* Terminating entry */
{ }
@@ -164,6 +167,8 @@ MODULE_DEVICE_TABLE (usb, storage_usb_ids);
.initFunction = init_function, \
}

+#define COMPLIANT_DEV UNUSUAL_DEV
+
#define USUAL_DEV(use_protocol, use_transport, use_type) \
{ \
.useProtocol = use_protocol, \
@@ -173,6 +178,7 @@ MODULE_DEVICE_TABLE (usb, storage_usb_ids);
static struct us_unusual_dev us_unusual_dev_list[] = {
# include "unusual_devs.h"
# undef UNUSUAL_DEV
+# undef COMPLIANT_DEV
# undef USUAL_DEV

/* Terminating entry */
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index a4ad73b..2e995c9 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -155,6 +155,10 @@ struct us_data {
#ifdef CONFIG_PM
pm_hook suspend_resume_hook;
#endif
+
+ /* hacks for READ CAPACITY bug handling */
+ int use_last_sector_hacks;
+ int last_sector_retries;
};

/* Convert between us_data and the corresponding Scsi_Host */
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 25adfc3..c8616a0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -421,9 +421,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* If we're a pdlfush thread, then implement pdflush collision avoidance
* against the entire list.
*
- * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
- * that it can be located for waiting on in __writeback_single_inode().
- *
* If `bdi' is non-zero then we're being asked to writeback a specific queue.
* This function assumes that the blockdev superblock's inodes are backed by
* a variety of queues, so all inodes are searched. For other superblocks,
@@ -443,6 +440,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
struct writeback_control *wbc)
{
const unsigned long start = jiffies; /* livelock avoidance */
+ int sync = wbc->sync_mode == WB_SYNC_ALL;

spin_lock(&inode_lock);
if (!wbc->for_kupdate || list_empty(&sb->s_io))
@@ -499,10 +497,6 @@ void generic_sync_sb_inodes(struct super_block *sb,
__iget(inode);
pages_skipped = wbc->pages_skipped;
__writeback_single_inode(inode, wbc);
- if (wbc->sync_mode == WB_SYNC_HOLD) {
- inode->dirtied_when = jiffies;
- list_move(&inode->i_list, &sb->s_dirty);
- }
if (current_is_pdflush())
writeback_release(bdi);
if (wbc->pages_skipped != pages_skipped) {
@@ -523,7 +517,49 @@ void generic_sync_sb_inodes(struct super_block *sb,
if (!list_empty(&sb->s_more_io))
wbc->more_io = 1;
}
- spin_unlock(&inode_lock);
+
+ if (sync) {
+ struct inode *inode, *old_inode = NULL;
+
+ /*
+ * Data integrity sync. Must wait for all pages under writeback,
+ * because there may have been pages dirtied before our sync
+ * call, but which had writeout started before we write it out.
+ * In which case, the inode may not be on the dirty list, but
+ * we still have to wait for that writeout.
+ */
+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ struct address_space *mapping;
+
+ if (inode->i_state & (I_FREEING|I_WILL_FREE))
+ continue;
+ mapping = inode->i_mapping;
+ if (mapping->nrpages == 0)
+ continue;
+ __iget(inode);
+ spin_unlock(&inode_lock);
+ /*
+ * We hold a reference to 'inode' so it couldn't have
+ * been removed from s_inodes list while we dropped the
+ * inode_lock. We cannot iput the inode now as we can
+ * be holding the last reference and we cannot iput it
+ * under inode_lock. So we keep the reference and iput
+ * it later.
+ */
+ iput(old_inode);
+ old_inode = inode;
+
+ filemap_fdatawait(mapping);
+
+ cond_resched();
+
+ spin_lock(&inode_lock);
+ }
+ spin_unlock(&inode_lock);
+ iput(old_inode);
+ } else
+ spin_unlock(&inode_lock);
+
return; /* Leave any unwritten inodes on s_io */
}
EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
@@ -588,8 +624,7 @@ restart:

/*
* writeback and wait upon the filesystem's dirty inodes. The caller will
- * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is
- * used to park the written inodes on sb->s_dirty for the wait pass.
+ * do this in two passes - one to write, and one to wait.
*
* A finite limit is set on the number of pages which will be written.
* To prevent infinite livelock of sys_sync().
@@ -600,30 +635,21 @@ restart:
void sync_inodes_sb(struct super_block *sb, int wait)
{
struct writeback_control wbc = {
- .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+ .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
.range_start = 0,
.range_end = LLONG_MAX,
};
- unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
- unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);

- wbc.nr_to_write = nr_dirty + nr_unstable +
- (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
- nr_dirty + nr_unstable;
- wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
- sync_sb_inodes(sb, &wbc);
-}
+ if (!wait) {
+ unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
+ unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);

-/*
- * Rather lame livelock avoidance.
- */
-static void set_sb_syncing(int val)
-{
- struct super_block *sb;
- spin_lock(&sb_lock);
- list_for_each_entry_reverse(sb, &super_blocks, s_list)
- sb->s_syncing = val;
- spin_unlock(&sb_lock);
+ wbc.nr_to_write = nr_dirty + nr_unstable +
+ (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+ } else
+ wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
+
+ sync_sb_inodes(sb, &wbc);
}

/**
@@ -652,9 +678,6 @@ static void __sync_inodes(int wait)
spin_lock(&sb_lock);
restart:
list_for_each_entry(sb, &super_blocks, s_list) {
- if (sb->s_syncing)
- continue;
- sb->s_syncing = 1;
sb->s_count++;
spin_unlock(&sb_lock);
down_read(&sb->s_umount);
@@ -672,13 +695,10 @@ restart:

void sync_inodes(int wait)
{
- set_sb_syncing(0);
__sync_inodes(0);

- if (wait) {
- set_sb_syncing(0);
+ if (wait)
__sync_inodes(1);
- }
}

/**
diff --git a/fs/sync.c b/fs/sync.c
index 6cc8cb4..9e5f60d 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -287,7 +287,7 @@ int do_sync_mapping_range(struct address_space *mapping, loff_t offset,

if (flags & SYNC_FILE_RANGE_WRITE) {
ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
- WB_SYNC_NONE);
+ WB_SYNC_ALL);
if (ret < 0)
goto out;
}
diff --git a/include/linux/fs.h b/include/linux/fs.h
index d621217..d1b3e22 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1080,7 +1080,6 @@ struct super_block {
struct rw_semaphore s_umount;
struct mutex s_lock;
int s_count;
- int s_syncing;
int s_need_sync_fs;
atomic_t s_active;
#ifdef CONFIG_SECURITY
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index d9a3bbe..bd414ec 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -52,8 +52,9 @@
US_FLAG(MAX_SECTORS_MIN,0x00002000) \
/* Sets max_sectors to arch min */ \
US_FLAG(BULK_IGNORE_TAG,0x00004000) \
- /* Ignore tag mismatch in bulk operations */
-
+ /* Ignore tag mismatch in bulk operations */ \
+ US_FLAG(CAPACITY_OK, 0x00010000) \
+ /* READ CAPACITY response is correct */

#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 12b15c5..c2835bb 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -30,7 +30,6 @@ static inline int task_is_pdflush(struct task_struct *task)
enum writeback_sync_modes {
WB_SYNC_NONE, /* Don't wait on anything */
WB_SYNC_ALL, /* Wait on every mapping */
- WB_SYNC_HOLD, /* Hold the inode on sb_dirty for sys_sync() */
};

/*
diff --git a/kernel/signal.c b/kernel/signal.c
index 6f06f43..3d161f0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1141,7 +1141,8 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
struct task_struct * p;

for_each_process(p) {
- if (p->pid > 1 && !same_thread_group(p, current)) {
+ if (task_pid_vnr(p) > 1 &&
+ !same_thread_group(p, current)) {
int err = group_send_sig_info(sig, info, p);
++count;
if (err != -EPERM)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4220a2e..521960b 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -61,27 +61,23 @@ struct clocksource *clock;

#ifdef CONFIG_GENERIC_TIME
/**
- * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
+ * clocksource_forward_now - update clock to the current time
*
- * private function, must hold xtime_lock lock when being
- * called. Returns the number of nanoseconds since the
- * last call to update_wall_time() (adjusted by NTP scaling)
+ * Forward the current clock to update its state since the last call to
+ * update_wall_time(). This is useful before significant clock changes,
+ * as it avoids having to deal with this time offset explicitly.
*/
-static inline s64 __get_nsec_offset(void)
+static void clocksource_forward_now(void)
{
cycle_t cycle_now, cycle_delta;
- s64 ns_offset;
+ s64 nsec;

- /* read clocksource: */
cycle_now = clocksource_read(clock);
-
- /* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+ clock->cycle_last = cycle_now;

- /* convert to nanoseconds: */
- ns_offset = cyc2ns(clock, cycle_delta);
-
- return ns_offset;
+ nsec = cyc2ns(clock, cycle_delta);
+ timespec_add_ns(&xtime, nsec);
}

/**
@@ -92,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
*/
void getnstimeofday(struct timespec *ts)
{
+ cycle_t cycle_now, cycle_delta;
unsigned long seq;
s64 nsecs;

@@ -101,7 +98,15 @@ void getnstimeofday(struct timespec *ts)
seq = read_seqbegin(&xtime_lock);

*ts = xtime;
- nsecs = __get_nsec_offset();
+
+ /* read clocksource: */
+ cycle_now = clocksource_read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* convert to nanoseconds: */
+ nsecs = cyc2ns(clock, cycle_delta);

} while (read_seqretry(&xtime_lock, seq));

@@ -134,22 +139,22 @@ EXPORT_SYMBOL(do_gettimeofday);
*/
int do_settimeofday(struct timespec *tv)
{
+ struct timespec ts_delta;
unsigned long flags;
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;

if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;

write_seqlock_irqsave(&xtime_lock, flags);

- nsec -= __get_nsec_offset();
+ clocksource_forward_now();
+
+ ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
+ ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
+ wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);

- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+ xtime = *tv;

- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
update_xtime_cache(0);

clock->error = 0;
@@ -175,22 +180,17 @@ EXPORT_SYMBOL(do_settimeofday);
static void change_clocksource(void)
{
struct clocksource *new;
- cycle_t now;
- u64 nsec;

new = clocksource_get_next();

if (clock == new)
return;

- new->cycle_last = 0;
- now = clocksource_read(new);
- nsec = __get_nsec_offset();
- timespec_add_ns(&xtime, nsec);
+ clocksource_forward_now();

clock = new;
- clock->cycle_last = now;
-
+ clock->cycle_last = 0;
+ clock->cycle_last = clocksource_read(new);
clock->error = 0;
clock->xtime_nsec = 0;
clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -205,8 +205,8 @@ static void change_clocksource(void)
*/
}
#else
+static inline void clocksource_forward_now(void) { }
static inline void change_clocksource(void) { }
-static inline s64 __get_nsec_offset(void) { return 0; }
#endif

/**
@@ -268,8 +268,6 @@ void __init timekeeping_init(void)

/* time in seconds when suspend began */
static unsigned long timekeeping_suspend_time;
-/* xtime offset when we went into suspend */
-static s64 timekeeping_suspend_nsecs;

/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -295,8 +293,6 @@ static int timekeeping_resume(struct sys_device *dev)
wall_to_monotonic.tv_sec -= sleep_length;
total_sleep_time += sleep_length;
}
- /* Make sure that we have the correct xtime reference */
- timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
update_xtime_cache(0);
/* re-base the last cycle value */
clock->cycle_last = 0;
@@ -322,8 +318,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
timekeeping_suspend_time = read_persistent_clock();

write_seqlock_irqsave(&xtime_lock, flags);
- /* Get the current xtime offset */
- timekeeping_suspend_nsecs = __get_nsec_offset();
+ clocksource_forward_now();
timekeeping_suspended = 1;
write_sequnlock_irqrestore(&xtime_lock, flags);

@@ -464,10 +459,10 @@ void update_wall_time(void)
*/
while (offset >= clock->cycle_interval) {
/* accumulate one interval */
- clock->xtime_nsec += clock->xtime_interval;
- clock->cycle_last += clock->cycle_interval;
offset -= clock->cycle_interval;
+ clock->cycle_last += clock->cycle_interval;

+ clock->xtime_nsec += clock->xtime_interval;
if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
xtime.tv_sec++;
diff --git a/lib/idr.c b/lib/idr.c
index 1c4f928..21154ae 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -121,7 +121,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
while (idp->id_free_cnt < IDR_FREE_MAX) {
struct idr_layer *new;
- new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
if (new == NULL)
return (0);
move_to_free_list(idp, new);
@@ -623,16 +623,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
}
EXPORT_SYMBOL(idr_replace);

-static void idr_cache_ctor(void *idr_layer)
-{
- memset(idr_layer, 0, sizeof(struct idr_layer));
-}
-
void __init idr_init_cache(void)
{
idr_layer_cache = kmem_cache_create("idr_layer_cache",
- sizeof(struct idr_layer), 0, SLAB_PANIC,
- idr_cache_ctor);
+ sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
}

/**
diff --git a/mm/filemap.c b/mm/filemap.c
index f3033d0..8a477d3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -209,7 +209,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
int ret;
struct writeback_control wbc = {
.sync_mode = sync_mode,
- .nr_to_write = mapping->nrpages * 2,
+ .nr_to_write = LONG_MAX,
.range_start = start,
.range_end = end,
};
@@ -1304,7 +1304,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
goto out; /* skip atime */
size = i_size_read(inode);
if (pos < size) {
- retval = filemap_write_and_wait(mapping);
+ retval = filemap_write_and_wait_range(mapping, pos,
+ pos + iov_length(iov, nr_segs) - 1);
if (!retval) {
retval = mapping->a_ops->direct_IO(READ, iocb,
iov, pos, nr_segs);
@@ -2117,18 +2118,10 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (count != ocount)
*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);

- /*
- * Unmap all mmappings of the file up-front.
- *
- * This will cause any pte dirty bits to be propagated into the
- * pageframes for the subsequent filemap_write_and_wait().
- */
write_len = iov_length(iov, *nr_segs);
end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
- if (mapping_mapped(mapping))
- unmap_mapping_range(mapping, pos, write_len, 0);

- written = filemap_write_and_wait(mapping);
+ written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
if (written)
goto out;

@@ -2519,7 +2512,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
* the file data here, to try to honour O_DIRECT expectations.
*/
if (unlikely(file->f_flags & O_DIRECT) && written)
- status = filemap_write_and_wait(mapping);
+ status = filemap_write_and_wait_range(mapping,
+ pos, pos + written - 1);

return written ? written : status;
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 24de8b6..8875822 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -872,9 +872,11 @@ int write_cache_pages(struct address_space *mapping,
int done = 0;
struct pagevec pvec;
int nr_pages;
+ pgoff_t uninitialized_var(writeback_index);
pgoff_t index;
pgoff_t end; /* Inclusive */
- int scanned = 0;
+ pgoff_t done_index;
+ int cycled;
int range_whole = 0;

if (wbc->nonblocking && bdi_write_congested(bdi)) {
@@ -884,82 +886,134 @@ int write_cache_pages(struct address_space *mapping,

pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
- index = mapping->writeback_index; /* Start from prev offset */
+ writeback_index = mapping->writeback_index; /* prev offset */
+ index = writeback_index;
+ if (index == 0)
+ cycled = 1;
+ else
+ cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_CACHE_SHIFT;
end = wbc->range_end >> PAGE_CACHE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
- scanned = 1;
+ cycled = 1; /* ignore range_cyclic tests */
}
retry:
- while (!done && (index <= end) &&
- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
- PAGECACHE_TAG_DIRTY,
- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
- unsigned i;
+ done_index = index;
+ while (!done && (index <= end)) {
+ int i;
+
+ nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;

- scanned = 1;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];

/*
- * At this point we hold neither mapping->tree_lock nor
- * lock on the page itself: the page may be truncated or
- * invalidated (changing page->mapping to NULL), or even
- * swizzled back from swapper_space to tmpfs file
- * mapping
+ * At this point, the page may be truncated or
+ * invalidated (changing page->mapping to NULL), or
+ * even swizzled back from swapper_space to tmpfs file
+ * mapping. However, page->index will not change
+ * because we have a reference on the page.
*/
+ if (page->index > end) {
+ /*
+ * can't be range_cyclic (1st pass) because
+ * end == -1 in that case.
+ */
+ done = 1;
+ break;
+ }
+
+ done_index = page->index + 1;
+
lock_page(page);

+ /*
+ * Page truncated or invalidated. We can freely skip it
+ * then, even for data integrity operations: the page
+ * has disappeared concurrently, so there could be no
+ * real expectation of this data interity operation
+ * even if there is now a new, dirty page at the same
+ * pagecache address.
+ */
if (unlikely(page->mapping != mapping)) {
+continue_unlock:
unlock_page(page);
continue;
}

- if (!wbc->range_cyclic && page->index > end) {
- done = 1;
- unlock_page(page);
- continue;
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
}

- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
-
- if (PageWriteback(page) ||
- !clear_page_dirty_for_io(page)) {
- unlock_page(page);
- continue;
+ if (PageWriteback(page)) {
+ if (wbc->sync_mode != WB_SYNC_NONE)
+ wait_on_page_writeback(page);
+ else
+ goto continue_unlock;
}

+ BUG_ON(PageWriteback(page));
+ if (!clear_page_dirty_for_io(page))
+ goto continue_unlock;
+
ret = (*writepage)(page, wbc, data);

- if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
- unlock_page(page);
- ret = 0;
+ if (unlikely(ret)) {
+ if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ unlock_page(page);
+ ret = 0;
+ } else {
+ /*
+ * done_index is set past this page,
+ * so media errors will not choke
+ * background writeout for the entire
+ * file. This has consequences for
+ * range_cyclic semantics (ie. it may
+ * not be suitable for data integrity
+ * writeout).
+ */
+ done = 1;
+ break;
+ }
+ }
+
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ wbc->nr_to_write--;
+ if (wbc->nr_to_write <= 0) {
+ done = 1;
+ break;
+ }
}
- if (ret || (--(wbc->nr_to_write) <= 0))
- done = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
done = 1;
+ break;
}
}
pagevec_release(&pvec);
cond_resched();
}
- if (!scanned && !done) {
+ if (!cycled) {
/*
+ * range_cyclic:
* We hit the last page and there is more work to be done: wrap
* back to the start of the file
*/
- scanned = 1;
+ cycled = 1;
index = 0;
+ end = writeback_index - 1;
goto retry;
}
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
+ mapping->writeback_index = done_index;

if (wbc->range_cont)
wbc->range_start = index << PAGE_CACHE_SHIFT;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1ab341e..f57d576 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -576,10 +576,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
else if (!ret) {
if (spliced)
break;
- if (flags & SPLICE_F_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
if (sock_flag(sk, SOCK_DONE))
break;
if (sk->sk_err) {
@@ -597,6 +593,10 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
ret = -ENOTCONN;
break;
}
+ if (flags & SPLICE_F_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
if (!timeo) {
ret = -EAGAIN;
break;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 29c7c99..52ee1dc 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -298,6 +298,10 @@ static void fib6_dump_end(struct netlink_callback *cb)
struct fib6_walker_t *w = (void*)cb->args[2];

if (w) {
+ if (cb->args[4]) {
+ cb->args[4] = 0;
+ fib6_walker_unlink(w);
+ }
cb->args[2] = 0;
kfree(w);
}
@@ -330,15 +334,12 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
read_lock_bh(&table->tb6_lock);
res = fib6_walk_continue(w);
read_unlock_bh(&table->tb6_lock);
- if (res != 0) {
- if (res < 0)
- fib6_walker_unlink(w);
- goto end;
+ if (res <= 0) {
+ fib6_walker_unlink(w);
+ cb->args[4] = 0;
}
- fib6_walker_unlink(w);
- cb->args[4] = 0;
}
-end:
+
return res;
}

diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 246f906..ea51fcd 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -637,8 +637,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
break;

n->next = *ins;
- wmb();
+ tcf_tree_lock(tp);
*ins = n;
+ tcf_tree_unlock(tp);

*arg = (unsigned long)n;
return 0;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index d14f020..d2943a4 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -924,6 +924,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
}
}
sch->qstats.overlimits++;
+ qdisc_watchdog_cancel(&q->watchdog);
qdisc_watchdog_schedule(&q->watchdog, next_event);
fin:
return skb;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7c622af..649d174 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3635,6 +3635,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
+ struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;

@@ -3664,6 +3665,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;

+ /* Silently discard the chunk if stream-id is not valid */
+ sctp_walk_fwdtsn(skip, chunk) {
+ if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+ goto discard_noforce;
+ }
+
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
@@ -3695,6 +3702,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
+ struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;

@@ -3724,6 +3732,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;

+ /* Silently discard the chunk if stream-id is not valid */
+ sctp_walk_fwdtsn(skip, chunk) {
+ if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+ goto gen_shutdown;
+ }
+
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 9b4e0e9..3c0f421 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -270,6 +270,7 @@ long keyctl_join_session_keyring(const char __user *_name)

/* join the session */
ret = join_session_keyring(name);
+ kfree(name);

error:
return ret;
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 591f62f..8c857d5 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -629,6 +629,36 @@ static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+ HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Capture Source",
+ .info = ad198x_mux_enum_info,
+ .get = ad198x_mux_enum_get,
+ .put = ad198x_mux_enum_put,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "External Amplifier",
+ .info = ad198x_eapd_info,
+ .get = ad198x_eapd_get,
+ .put = ad198x_eapd_put,
+ .private_value = 0x1b | (1 << 8), /* port-D, inversed */
+ },
+ { } /* end */
+};
+
+static struct snd_kcontrol_new ad1986a_samsung_mixers[] = {
+ HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
+ HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
+ HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
+ HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
@@ -917,6 +947,7 @@ enum {
AD1986A_LAPTOP_EAPD,
AD1986A_LAPTOP_AUTOMUTE,
AD1986A_ULTRA,
+ AD1986A_SAMSUNG,
AD1986A_MODELS
};

@@ -927,6 +958,7 @@ static const char *ad1986a_models[AD1986A_MODELS] = {
[AD1986A_LAPTOP_EAPD] = "laptop-eapd",
[AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute",
[AD1986A_ULTRA] = "ultra",
+ [AD1986A_SAMSUNG] = "samsung",
};

static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
@@ -949,9 +981,9 @@ static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD),
SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
- SND_PCI_QUIRK(0x144d, 0xc023, "Samsung X60", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x144d, 0xc024, "Samsung R65", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x144d, 0xc026, "Samsung X11", AD1986A_LAPTOP_EAPD),
+ SND_PCI_QUIRK(0x144d, 0xc023, "Samsung X60", AD1986A_SAMSUNG),
+ SND_PCI_QUIRK(0x144d, 0xc024, "Samsung R65", AD1986A_SAMSUNG),
+ SND_PCI_QUIRK(0x144d, 0xc026, "Samsung X11", AD1986A_SAMSUNG),
SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA),
SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK),
SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_LAPTOP),
@@ -1033,6 +1065,17 @@ static int patch_ad1986a(struct hda_codec *codec)
break;
case AD1986A_LAPTOP_EAPD:
spec->mixers[0] = ad1986a_laptop_eapd_mixers;
+ spec->num_init_verbs = 2;
+ spec->init_verbs[1] = ad1986a_eapd_init_verbs;
+ spec->multiout.max_channels = 2;
+ spec->multiout.num_dacs = 1;
+ spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
+ if (!is_jack_available(codec, 0x25))
+ spec->multiout.dig_out_nid = 0;
+ spec->input_mux = &ad1986a_laptop_eapd_capture_source;
+ break;
+ case AD1986A_SAMSUNG:
+ spec->mixers[0] = ad1986a_samsung_mixers;
spec->num_init_verbs = 3;
spec->init_verbs[1] = ad1986a_eapd_init_verbs;
spec->init_verbs[2] = ad1986a_automic_verbs;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a1a3a34..7225f0f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9882,6 +9882,7 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FUJITSU),
SND_PCI_QUIRK(0x144d, 0xc032, "Samsung Q1 Ultra", ALC262_ULTRA),
SND_PCI_QUIRK(0x144d, 0xc039, "Samsung Q1U EL", ALC262_ULTRA),
+ SND_PCI_QUIRK(0x144d, 0xc510, "Samsung Q45", ALC262_HIPPO),
SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000 y410", ALC262_LENOVO_3000),
SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_BENQ_ED8),
SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_BENQ_T31),
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/