Merge "ASoC: dapm: lock mixer & mux update power with DAPM mutex"
diff --git a/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt b/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
index fdba7c2..ac49b02 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
@@ -30,6 +30,7 @@
min y, max x and max y values.
- goodix,i2c-pull-up : To specify pull up is required.
- goodix,no-force-update : To specify force update is allowed.
+ - goodix,enable-power-off : Power off touchscreen during suspend.
- goodix,button-map : Button map of key codes. The number of key codes
depend on panel.
- goodix,cfg-data0 : Touch screen controller config data group 0. Ask vendor
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 5e686a4f..56bdc59 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -407,6 +407,17 @@
Required properties:
- compatible : "qcom,msm8974-audio-taiko"
- qcom,model : The user-visible name of this sound card.
+- reg : Offset and length of the register region(s) for MI2S/PCM MUX
+- reg-names : Register region name(s) referenced in reg above
+ Required register resource entries are:
+ "lpaif_pri_mode_muxsel": Physical address of MUX to select between
+ Primary PCM and Primary MI2S
+ "lpaif_sec_mode_muxsel": Physical address of MUX to select between
+ Secondary PCM and Secondary MI2S
+ "lpaif_tert_mode_muxsel": Physical address of MUX to select between
+ Primary PCM and Tertiary MI2S
+ "lpaif_quat_mode_muxsel": Physical address of MUX to select between
+ Secondary PCM and Quarternary MI2S
- qcom,audio-routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source.
diff --git a/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi b/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
index 23b65f3..92e6fc1 100644
--- a/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
+++ b/arch/arm/boot/dts/dsi-panel-hx8379a-wvga-video.dtsi
@@ -81,14 +81,47 @@
00 00 00 00
39 01 00 00 00 00 24
E0 79 05 0F
- 14 26 20 3F
- 2A 43 04 0C
- 11 15 17 15
- 15 10 13 05
- 0F 14 26 20
- 3F 2A 43 04
- 0C 11 15 17
- 15 15 10 13
+ 14 23 24 3F
+ 30 46 06 10
+ 13 16 17 16
+ 16 13 18 05
+ 0F 14 23 24
+ 3F 30 46 06
+ 10 13 16 17
+ 16 16 13 18
+ 39 01 00 00 00 00 80
+ C1 01 00 07
+ 10 17 1D 2A
+ 33 3A 43 4A
+ 52 5B 64 6D
+ 78 7F 88 90
+ 98 A0 A9 B2
+ B9 C1 C9 D1
+ D7 DF E6 ED
+ F4 FA FD 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 08 10 18
+ 20 28 30 38
+ 40 47 4F 58
+ 60 68 70 78
+ 80 88 90 98
+ A0 A9 B1 B9
+ C1 C9 D1 D8
+ E0 E8 F0 F9
+ FE 00 00 00
+ 00 00 00 00
+ 00 00 00 08
+ 10 18 1E 26
+ 2E 34 3A 41
+ 49 4F 58 5E
+ 67 6F 77 80
+ 88 8F 97 9F
+ A7 AF B8 BF
+ C7 D1 D8 E3
+ EA F6 FF 00
+ 00 00 00 00
+ 00 00 00 00
23 01 00 00 00 00 02
cc 02
39 01 00 00 00 00 05
diff --git a/arch/arm/boot/dts/msm8226-bus.dtsi b/arch/arm/boot/dts/msm8226-bus.dtsi
index d87aa3e..74b4a30 100644
--- a/arch/arm/boot/dts/msm8226-bus.dtsi
+++ b/arch/arm/boot/dts/msm8226-bus.dtsi
@@ -78,7 +78,7 @@
mas-vfe {
cell-id = <29>;
label = "mas-vfe";
- qcom,masterp = <16>;
+ qcom,masterp = <7>;
qcom,tier = <2>;
qcom,hw-sel = "NoC";
qcom,perm-mode = "Bypass";
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 311944c..e612df7 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -741,6 +741,14 @@
sound {
compatible = "qcom,msm8974-audio-taiko";
qcom,model = "msm8974-taiko-snd-card";
+ reg = <0xfe02b000 0x4>,
+ <0xfe02c000 0x4>,
+ <0xfe02d000 0x4>,
+ <0xfe02e000 0x4>;
+ reg-names = "lpaif_pri_mode_muxsel",
+ "lpaif_sec_mode_muxsel",
+ "lpaif_tert_mode_muxsel",
+ "lpaif_quat_mode_muxsel";
qcom,audio-routing =
"RX_BIAS", "MCLK",
diff --git a/arch/arm/boot/dts/msm8974pro.dtsi b/arch/arm/boot/dts/msm8974pro.dtsi
index 6d5c862..174cee6 100644
--- a/arch/arm/boot/dts/msm8974pro.dtsi
+++ b/arch/arm/boot/dts/msm8974pro.dtsi
@@ -39,6 +39,17 @@
/delete-property/ qcom,pmic-sw-mode-temp-hysteresis;
/delete-property/ qcom,pmic-sw-mode-regs;
};
+
+ sound {
+ reg = <0xfe02c000 0x4>,
+ <0xfe02d000 0x4>,
+ <0xfe02e000 0x4>,
+ <0xfe02f000 0x4>;
+ reg-names = "lpaif_pri_mode_muxsel",
+ "lpaif_sec_mode_muxsel",
+ "lpaif_tert_mode_muxsel",
+ "lpaif_quat_mode_muxsel";
+ };
};
/* GPU overrides */
@@ -48,20 +59,29 @@
qcom,initial-pwrlevel = <6>;
+ qcom,msm-bus,num-cases = <10>;
/* Updated bus bandwidth requirements */
qcom,msm-bus,vectors-KBps =
/* Off */
<26 512 0 0>, <89 604 0 0>,
/* SVS */
- <26 512 0 2400000>, <89 604 0 3000000>,
+ <26 512 0 2400000>, <89 604 0 3200000>,
/* Nominal / SVS */
- <26 512 0 4656000>, <89 604 0 3000000>,
- /* Nominal */
- <26 512 0 4656000>, <89 604 0 5120000>,
- /* Turbo / Nominal */
- <26 512 0 7464000>, <89 604 0 5120000>,
+ <26 512 0 3680000>, <89 604 0 3200000>,
+ /* Nominal / Nominal */
+ <26 512 0 3680000>, <89 604 0 5280000>,
+ /* Nominal / Nominal */
+ <26 512 0 4912000>, <89 604 0 5280000>,
+ /* Nominal / Turbo */
+ <26 512 0 4912000>, <89 604 0 6224000>,
+ /* Turbo / Turbo */
+ <26 512 0 7464000>, <89 604 0 6224000>,
+ /* Nominal / Turbo */
+ <26 512 0 4912000>, <89 604 0 7400000>,
/* Turbo */
- <26 512 0 7464000>, <89 604 0 6400000>;
+ <26 512 0 7464000>, <89 604 0 7400000>,
+ /* Turbo */
+ <26 512 0 7464000>, <89 604 0 9248000>;
qcom,gpu-pwrlevels {
#address-cells = <1>;
@@ -72,35 +92,35 @@
qcom,gpu-pwrlevel@0 {
reg = <0>;
qcom,gpu-freq = <578000000>;
- qcom,bus-freq = <5>;
+ qcom,bus-freq = <9>;
qcom,io-fraction = <33>;
};
qcom,gpu-pwrlevel@1 {
reg = <1>;
qcom,gpu-freq = <462400000>;
- qcom,bus-freq = <4>;
+ qcom,bus-freq = <8>;
qcom,io-fraction = <33>;
};
qcom,gpu-pwrlevel@2 {
reg = <2>;
qcom,gpu-freq = <462400000>;
- qcom,bus-freq = <3>;
+ qcom,bus-freq = <7>;
qcom,io-fraction = <66>;
};
qcom,gpu-pwrlevel@3 {
reg = <3>;
qcom,gpu-freq = <389000000>;
- qcom,bus-freq = <4>;
+ qcom,bus-freq = <6>;
qcom,io-fraction = <66>;
};
qcom,gpu-pwrlevel@4 {
reg = <4>;
qcom,gpu-freq = <389000000>;
- qcom,bus-freq = <3>;
+ qcom,bus-freq = <5>;
qcom,io-fraction = <66>;
};
diff --git a/arch/arm/mach-msm/board-8226-gpiomux.c b/arch/arm/mach-msm/board-8226-gpiomux.c
index 4dcbc3a..34e23d1 100644
--- a/arch/arm/mach-msm/board-8226-gpiomux.c
+++ b/arch/arm/mach-msm/board-8226-gpiomux.c
@@ -303,13 +303,13 @@
static struct gpiomux_setting goodix_ldo_en_sus_cfg = {
.func = GPIOMUX_FUNC_GPIO,
- .drv = GPIOMUX_DRV_6MA,
- .pull = GPIOMUX_PULL_UP,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
};
static struct gpiomux_setting goodix_int_act_cfg = {
.func = GPIOMUX_FUNC_GPIO,
- .drv = GPIOMUX_DRV_8MA,
+ .drv = GPIOMUX_DRV_6MA,
.pull = GPIOMUX_PULL_UP,
};
@@ -321,14 +321,14 @@
static struct gpiomux_setting goodix_reset_act_cfg = {
.func = GPIOMUX_FUNC_GPIO,
- .drv = GPIOMUX_DRV_8MA,
+ .drv = GPIOMUX_DRV_6MA,
.pull = GPIOMUX_PULL_UP,
};
static struct gpiomux_setting goodix_reset_sus_cfg = {
.func = GPIOMUX_FUNC_GPIO,
- .drv = GPIOMUX_DRV_8MA,
- .pull = GPIOMUX_PULL_UP,
+ .drv = GPIOMUX_DRV_2MA,
+ .pull = GPIOMUX_PULL_DOWN,
};
static struct msm_gpiomux_config msm_skuf_blsp_configs[] __initdata = {
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 97dc26d..91b90a8 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -419,6 +419,8 @@
/* Notify the DCI process that the peripheral DCI Channel is up */
for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+ if (!driver->dci_client_tbl[i].client)
+ continue;
if (driver->dci_client_tbl[i].list & peripheral_mask) {
info.si_signo = driver->dci_client_tbl[i].signal_type;
stat = send_sig_info(
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 50a0d12..becb611 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2304,12 +2304,18 @@
_adreno_ft_restart_device(struct kgsl_device *device,
struct kgsl_context *context)
{
- /* If device soft reset fails try hard reset */
- if (adreno_soft_reset(device))
- KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
- else
- /* Soft reset is successful */
- goto reset_done;
+ /*
+ * If device soft reset fails try hard reset, but don't attempt
+ * soft reset on page faults. In cases of page faults, go straight
+ * to hard reset.
+ */
+ if (!(device->mmu.fault)) {
+ if (adreno_soft_reset(device))
+ KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
+ else
+ /* Soft reset is successful */
+ goto reset_done;
+ }
/* restart device */
if (adreno_stop(device)) {
@@ -2432,7 +2438,6 @@
struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
unsigned int long_ib = 0;
static int no_context_ft;
- struct kgsl_mmu *mmu = &device->mmu;
context = kgsl_context_get(device, ft_data->context_id);
@@ -2507,8 +2512,6 @@
/* Do not try to replay if hang is due to a pagefault */
if (context && test_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv)) {
- /* Resume MMU */
- mmu->mmu_ops->mmu_pagefault_resume(mmu);
if ((ft_data->context_id == context->id) &&
(ft_data->global_eop == context->pagefault_ts)) {
ft_data->ft_policy &= ~KGSL_FT_REPLAY;
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index acb3b17..6275a72 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -2052,7 +2052,6 @@
.mmu_setstate = kgsl_iommu_setstate,
.mmu_device_setstate = kgsl_iommu_default_setstate,
.mmu_pagefault = NULL,
- .mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
.mmu_enable_clk = kgsl_iommu_enable_clk,
.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index faba81e..64705f8 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -141,8 +141,6 @@
void (*mmu_pagefault) (struct kgsl_mmu *mmu);
phys_addr_t (*mmu_get_current_ptbase)
(struct kgsl_mmu *mmu);
- void (*mmu_pagefault_resume)
- (struct kgsl_mmu *mmu);
void (*mmu_disable_clk_on_ts)
(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
int (*mmu_enable_clk)
diff --git a/drivers/input/touchscreen/ft5x06_ts.c b/drivers/input/touchscreen/ft5x06_ts.c
index ef76e69..f366364 100644
--- a/drivers/input/touchscreen/ft5x06_ts.c
+++ b/drivers/input/touchscreen/ft5x06_ts.c
@@ -131,8 +131,9 @@
#define FT_FW_PKT_DLY_MS 20
#define FT_FW_LAST_PKT 0x6ffa
#define FT_EARSE_DLY_MS 100
+#define FT_55_AA_DLY_NS 5000
-#define FT_UPGRADE_LOOP 10
+#define FT_UPGRADE_LOOP 30
#define FT_CAL_START 0x04
#define FT_CAL_FIN 0x00
#define FT_CAL_STORE 0x05
@@ -142,6 +143,30 @@
#define FT_INFO_MAX_LEN 512
+#define FT_BLOADER_SIZE_OFF 12
+#define FT_BLOADER_NEW_SIZE 30
+#define FT_DATA_LEN_OFF_OLD_FW 8
+#define FT_DATA_LEN_OFF_NEW_FW 14
+#define FT_FINISHING_PKT_LEN_OLD_FW 6
+#define FT_FINISHING_PKT_LEN_NEW_FW 12
+#define FT_MAGIC_BLOADER_Z7 0x7bfa
+#define FT_MAGIC_BLOADER_LZ4 0x6ffa
+#define FT_MAGIC_BLOADER_GZF_30 0x7ff4
+#define FT_MAGIC_BLOADER_GZF 0x7bf4
+
+enum {
+ FT_BLOADER_VERSION_LZ4 = 0,
+ FT_BLOADER_VERSION_Z7 = 1,
+ FT_BLOADER_VERSION_GZF = 2,
+};
+
+enum {
+ FT_FT5336_FAMILY_ID_0x11 = 0x11,
+ FT_FT5336_FAMILY_ID_0x12 = 0x12,
+ FT_FT5336_FAMILY_ID_0x13 = 0x13,
+ FT_FT5336_FAMILY_ID_0x14 = 0x14,
+};
+
#define FT_STORE_TS_INFO(buf, id, name, max_tch, group_id, fw_vkey_support, \
fw_name, fw_maj, fw_min, fw_sub_min) \
snprintf(buf, FT_INFO_MAX_LEN, \
@@ -645,11 +670,20 @@
u8 reset_reg;
u8 w_buf[FT_MAX_WR_BUF] = {0}, r_buf[FT_MAX_RD_BUF] = {0};
u8 pkt_buf[FT_FW_PKT_LEN + FT_FW_PKT_META_LEN];
- int rc, i, j, temp;
+ int i, j, temp;
u32 pkt_num, pkt_len;
+ u8 is_5336_new_bootloader = false;
+ u8 is_5336_fwsize_30 = false;
u8 fw_ecc;
+ /* determine firmware size */
+ if (*(data + data_len - FT_BLOADER_SIZE_OFF) == FT_BLOADER_NEW_SIZE)
+ is_5336_fwsize_30 = true;
+ else
+ is_5336_fwsize_30 = false;
+
for (i = 0, j = 0; i < FT_UPGRADE_LOOP; i++) {
+ msleep(FT_EARSE_DLY_MS);
/* reset - write 0xaa and 0x55 to reset register */
if (ts_data->family_id == FT6X06_ID)
reset_reg = FT_RST_CMD_REG2;
@@ -660,16 +694,17 @@
msleep(info.delay_aa);
ft5x0x_write_reg(client, reset_reg, FT_UPGRADE_55);
- msleep(info.delay_55);
+ if (i <= (FT_UPGRADE_LOOP / 2))
+ msleep(info.delay_55 + i * 3);
+ else
+ msleep(info.delay_55 - (i - (FT_UPGRADE_LOOP / 2)) * 2);
/* Enter upgrade mode */
w_buf[0] = FT_UPGRADE_55;
- w_buf[1] = FT_UPGRADE_AA;
- do {
- j++;
- rc = ft5x06_i2c_write(client, w_buf, 2);
- msleep(FT_RETRY_DLY);
- } while (rc <= 0 && j < FT_MAX_TRIES);
+ ft5x06_i2c_write(client, w_buf, 1);
+ usleep(FT_55_AA_DLY_NS);
+ w_buf[0] = FT_UPGRADE_AA;
+ ft5x06_i2c_write(client, w_buf, 1);
/* check READ_ID */
msleep(info.delay_readid);
@@ -692,17 +727,40 @@
return -EIO;
}
+ w_buf[0] = 0xcd;
+ ft5x06_i2c_read(client, w_buf, 1, r_buf, 1);
+
+ if (r_buf[0] <= 4)
+ is_5336_new_bootloader = FT_BLOADER_VERSION_LZ4;
+ else if (r_buf[0] == 7)
+ is_5336_new_bootloader = FT_BLOADER_VERSION_Z7;
+ else if (r_buf[0] >= 0x0f &&
+ ((ts_data->family_id == FT_FT5336_FAMILY_ID_0x11) ||
+ (ts_data->family_id == FT_FT5336_FAMILY_ID_0x12) ||
+ (ts_data->family_id == FT_FT5336_FAMILY_ID_0x13) ||
+ (ts_data->family_id == FT_FT5336_FAMILY_ID_0x14)))
+ is_5336_new_bootloader = FT_BLOADER_VERSION_GZF;
+ else
+ is_5336_new_bootloader = FT_BLOADER_VERSION_LZ4;
+
/* erase app and panel paramenter area */
w_buf[0] = FT_ERASE_APP_REG;
ft5x06_i2c_write(client, w_buf, 1);
msleep(info.delay_erase_flash);
- w_buf[0] = FT_ERASE_PANEL_REG;
- ft5x06_i2c_write(client, w_buf, 1);
+ if (is_5336_fwsize_30) {
+ w_buf[0] = FT_ERASE_PANEL_REG;
+ ft5x06_i2c_write(client, w_buf, 1);
+ }
msleep(FT_EARSE_DLY_MS);
/* program firmware */
- data_len = data_len - 8;
+ if (is_5336_new_bootloader == FT_BLOADER_VERSION_LZ4
+ || is_5336_new_bootloader == FT_BLOADER_VERSION_Z7)
+ data_len = data_len - FT_DATA_LEN_OFF_OLD_FW;
+ else
+ data_len = data_len - FT_DATA_LEN_OFF_NEW_FW;
+
pkt_num = (data_len) / FT_FW_PKT_LEN;
pkt_len = FT_FW_PKT_LEN;
pkt_buf[0] = FT_FW_START_REG;
@@ -745,17 +803,45 @@
}
/* send the finishing packet */
- for (i = 0; i < 6; i++) {
- temp = FT_FW_LAST_PKT + i;
- pkt_buf[2] = (u8) (temp >> 8);
- pkt_buf[3] = (u8) temp;
- temp = 1;
- pkt_buf[4] = (u8) (temp >> 8);
- pkt_buf[5] = (u8) temp;
- pkt_buf[6] = data[data_len + i];
- fw_ecc ^= pkt_buf[6];
- ft5x06_i2c_write(client, pkt_buf, temp + FT_FW_PKT_META_LEN);
- msleep(FT_FW_PKT_DLY_MS);
+ if (is_5336_new_bootloader == FT_BLOADER_VERSION_LZ4 ||
+ is_5336_new_bootloader == FT_BLOADER_VERSION_Z7) {
+ for (i = 0; i < FT_FINISHING_PKT_LEN_OLD_FW; i++) {
+ if (is_5336_new_bootloader == FT_BLOADER_VERSION_Z7)
+ temp = FT_MAGIC_BLOADER_Z7 + i;
+ else if (is_5336_new_bootloader ==
+ FT_BLOADER_VERSION_LZ4)
+ temp = FT_MAGIC_BLOADER_LZ4 + i;
+ pkt_buf[2] = (u8)(temp >> 8);
+ pkt_buf[3] = (u8)temp;
+ temp = 1;
+ pkt_buf[4] = (u8)(temp >> 8);
+ pkt_buf[5] = (u8)temp;
+ pkt_buf[6] = data[data_len + i];
+ fw_ecc ^= pkt_buf[6];
+
+ ft5x06_i2c_write(client,
+ pkt_buf, temp + FT_FW_PKT_META_LEN);
+ msleep(FT_FW_PKT_DLY_MS);
+ }
+ } else if (is_5336_new_bootloader == FT_BLOADER_VERSION_GZF) {
+ for (i = 0; i < FT_FINISHING_PKT_LEN_NEW_FW; i++) {
+ if (is_5336_fwsize_30)
+ temp = FT_MAGIC_BLOADER_GZF_30 + i;
+ else
+ temp = FT_MAGIC_BLOADER_GZF + i;
+ pkt_buf[2] = (u8)(temp >> 8);
+ pkt_buf[3] = (u8)temp;
+ temp = 1;
+ pkt_buf[4] = (u8)(temp >> 8);
+ pkt_buf[5] = (u8)temp;
+ pkt_buf[6] = data[data_len + i];
+ fw_ecc ^= pkt_buf[6];
+
+ ft5x06_i2c_write(client,
+ pkt_buf, temp + FT_FW_PKT_META_LEN);
+ msleep(FT_FW_PKT_DLY_MS);
+
+ }
}
/* verify checksum */
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
index 8b08ac9..c3954a1 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -95,6 +95,8 @@
static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms);
static void gtp_int_sync(struct goodix_ts_data *ts, int ms);
static int gtp_i2c_test(struct i2c_client *client);
+static int goodix_power_off(struct goodix_ts_data *ts);
+static int goodix_power_on(struct goodix_ts_data *ts);
#if defined(CONFIG_FB)
static int fb_notifier_callback(struct notifier_block *self,
@@ -758,7 +760,7 @@
ts: private data.
Output:
Executive outcomes.
- 1: succeed, otherwise failed.
+ >0: succeed, otherwise failed.
*******************************************************/
static s8 gtp_enter_sleep(struct goodix_ts_data *ts)
{
@@ -769,20 +771,37 @@
(u8)GTP_REG_SLEEP, 5};
ret = gpio_direction_output(ts->pdata->irq_gpio, 0);
- usleep(5000);
- while (retry++ < 5) {
- ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
- if (ret > 0) {
- dev_dbg(&ts->client->dev,
- "GTP enter sleep!");
- return ret;
+ if (ret)
+ dev_err(&ts->client->dev,
+ "GTP sleep: Cannot reconfig gpio %d.\n",
+ ts->pdata->irq_gpio);
+ if (ts->pdata->enable_power_off) {
+ ret = gpio_direction_output(ts->pdata->reset_gpio, 0);
+ if (ret)
+ dev_err(&ts->client->dev,
+ "GTP sleep: Cannot reconfig gpio %d.\n",
+ ts->pdata->reset_gpio);
+ ret = goodix_power_off(ts);
+ if (ret) {
+ dev_err(&ts->client->dev, "GTP power off failed.\n");
+ return 0;
}
- msleep(20);
+ return 1;
+ } else {
+ usleep(5000);
+ while (retry++ < 5) {
+ ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+ if (ret == 1) {
+ dev_dbg(&ts->client->dev, "GTP enter sleep!");
+ return ret;
+ }
+ msleep(20);
+ }
+ dev_err(&ts->client->dev, "GTP send sleep cmd failed.\n");
+ return ret;
}
- dev_err(&ts->client->dev, "GTP send sleep cmd failed.\n");
- return ret;
}
-#endif
+#endif /* !GTP_SLIDE_WAKEUP */
/*******************************************************
Function:
@@ -798,17 +817,36 @@
u8 retry = 0;
s8 ret = -1;
-#if GTP_POWER_CTRL_SLEEP
- gtp_reset_guitar(ts, 20);
+ if (ts->pdata->enable_power_off) {
+ ret = gpio_direction_output(ts->pdata->irq_gpio, 0);
+ if (ret)
+ dev_err(&ts->client->dev,
+ "GTP wakeup: Cannot reconfig gpio %d.\n",
+ ts->pdata->irq_gpio);
+ ret = gpio_direction_output(ts->pdata->reset_gpio, 0);
+ if (ret)
+ dev_err(&ts->client->dev,
+ "GTP wakeup: Cannot reconfig gpio %d.\n",
+ ts->pdata->reset_gpio);
+ ret = goodix_power_on(ts);
+ if (ret) {
+ dev_err(&ts->client->dev, "GTP power on failed.\n");
+ return 0;
+ }
- ret = gtp_send_cfg(ts);
- if (ret > 0) {
+ gtp_reset_guitar(ts, 20);
+
+ ret = gtp_send_cfg(ts);
+ if (ret <= 0) {
+ dev_err(&ts->client->dev,
+ "GTP wakeup sleep failed.\n");
+ return ret;
+ }
+
dev_dbg(&ts->client->dev,
- "Wakeup sleep send config success.");
- return 1;
- }
-#else
- while (retry++ < 10) {
+ "Wakeup sleep send config success.");
+ } else {
+err_retry:
#if GTP_SLIDE_WAKEUP
/* wakeup not by slide */
if (DOZE_WAKEUP != doze_status)
@@ -825,7 +863,7 @@
}
#endif
ret = gtp_i2c_test(ts->client);
- if (ret > 0) {
+ if (ret == 2) {
dev_dbg(&ts->client->dev, "GTP wakeup sleep.");
#if (!GTP_SLIDE_WAKEUP)
if (chip_gt9xxs == 0) {
@@ -839,10 +877,10 @@
return ret;
}
gtp_reset_guitar(ts, 20);
+ if (retry++ < 10)
+ goto err_retry;
+ dev_err(&ts->client->dev, "GTP wakeup sleep failed.\n");
}
-#endif
-
- dev_err(&ts->client->dev, "GTP wakeup sleep failed.\n");
return ret;
}
#endif /* !CONFIG_HAS_EARLYSUSPEND && !CONFIG_FB*/
@@ -1292,6 +1330,12 @@
{
int ret;
+ if (ts->power_on) {
+ dev_info(&ts->client->dev,
+ "Device already power on\n");
+ return 0;
+ }
+
if (!IS_ERR(ts->avdd)) {
ret = reg_set_optimum_mode_check(ts->avdd,
GOODIX_VDD_LOAD_MAX_UA);
@@ -1358,6 +1402,7 @@
}
}
+ ts->power_on = true;
return 0;
err_enable_vcc_i2c:
@@ -1376,6 +1421,7 @@
regulator_disable(ts->avdd);
err_enable_avdd:
err_set_opt_avdd:
+ ts->power_on = false;
return ret;
}
@@ -1389,6 +1435,12 @@
{
int ret;
+ if (!ts->power_on) {
+ dev_info(&ts->client->dev,
+ "Device already power off\n");
+ return 0;
+ }
+
if (!IS_ERR(ts->vcc_i2c)) {
ret = regulator_set_voltage(ts->vcc_i2c, 0,
GOODIX_I2C_VTG_MAX_UV);
@@ -1421,6 +1473,7 @@
"Regulator avdd disable failed ret=%d\n", ret);
}
+ ts->power_on = false;
return 0;
}
@@ -1536,6 +1589,9 @@
pdata->no_force_update = of_property_read_bool(np,
"goodix,no-force-update");
+
+ pdata->enable_power_off = of_property_read_bool(np,
+ "goodix,enable-power-off");
/* reset, irq gpio info */
pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios",
0, &pdata->reset_gpio_flags);
@@ -1662,6 +1718,7 @@
spin_lock_init(&ts->irq_lock);
i2c_set_clientdata(client, ts);
ts->gtp_rawdiff_mode = 0;
+ ts->power_on = false;
ret = goodix_power_init(ts);
if (ret) {
@@ -1712,6 +1769,7 @@
goto exit_free_inputdev;
}
+ mutex_init(&ts->lock);
#if defined(CONFIG_FB)
ts->fb_notif.notifier_call = fb_notifier_callback;
ret = fb_register_client(&ts->fb_notif);
@@ -1757,6 +1815,7 @@
init_done = true;
return 0;
exit_free_irq:
+ mutex_destroy(&ts->lock);
#if defined(CONFIG_FB)
if (fb_unregister_client(&ts->fb_notif))
dev_err(&client->dev,
@@ -1813,6 +1872,7 @@
#elif defined(CONFIG_HAS_EARLYSUSPEND)
unregister_early_suspend(&ts->early_suspend);
#endif
+ mutex_destroy(&ts->lock);
#if GTP_CREATE_WR_NODE
uninit_wr_node();
@@ -1868,6 +1928,7 @@
{
int ret = -1, i;
+ mutex_lock(&ts->lock);
#if GTP_ESD_PROTECT
ts->gtp_is_suspend = 1;
gtp_esd_switch(ts->client, SWITCH_OFF);
@@ -1888,12 +1949,13 @@
ret = gtp_enter_sleep(ts);
#endif
- if (ret < 0)
+ if (ret <= 0)
dev_err(&ts->client->dev, "GTP early suspend failed.\n");
/* to avoid waking up while not sleeping,
* delay 48 + 10ms to ensure reliability
*/
msleep(58);
+ mutex_unlock(&ts->lock);
}
/*******************************************************
@@ -1908,13 +1970,14 @@
{
int ret = -1;
+ mutex_lock(&ts->lock);
ret = gtp_wakeup_sleep(ts);
#if GTP_SLIDE_WAKEUP
doze_status = DOZE_DISABLED;
#endif
- if (ret < 0)
+ if (ret <= 0)
dev_err(&ts->client->dev, "GTP resume failed.\n");
if (ts->use_irq)
@@ -1927,6 +1990,7 @@
ts->gtp_is_suspend = 0;
gtp_esd_switch(ts->client, SWITCH_ON);
#endif
+ mutex_unlock(&ts->lock);
}
#if defined(CONFIG_FB)
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
index 1fdbfa3..125f6e6 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.h
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -36,6 +36,7 @@
#include <linux/regulator/consumer.h>
#include <linux/firmware.h>
#include <linux/debugfs.h>
+#include <linux/mutex.h>
#if defined(CONFIG_FB)
#include <linux/notifier.h>
@@ -62,6 +63,7 @@
u32 panel_maxy;
bool no_force_update;
bool i2c_pull_up;
+ bool enable_power_off;
size_t config_data_len[GOODIX_MAX_CFG_GROUP];
u8 *config_data[GOODIX_MAX_CFG_GROUP];
};
@@ -89,6 +91,8 @@
u8 fixed_cfg;
u8 esd_running;
u8 fw_error;
+ bool power_on;
+ struct mutex lock;
struct regulator *avdd;
struct regulator *vdd;
struct regulator *vcc_i2c;
@@ -107,7 +111,6 @@
#define GTP_CHANGE_X2Y 0
#define GTP_DRIVER_SEND_CFG 1
#define GTP_HAVE_TOUCH_KEY 1
-#define GTP_POWER_CTRL_SLEEP 0
/* auto updated by .bin file as default */
#define GTP_AUTO_UPDATE 0
@@ -119,6 +122,7 @@
#define GTP_ESD_PROTECT 0
#define GTP_WITH_PEN 0
+/* This cannot work when enable-power-off is on */
#define GTP_SLIDE_WAKEUP 0
/* double-click wakeup, function together with GTP_SLIDE_WAKEUP */
#define GTP_DBL_CLK_WAKEUP 0
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 5f36a4a..fb1caea 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -513,7 +513,9 @@
}
lo_tbl_ptr = cfg_data +
reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
-
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
+ reg_cfg_cmd->u.dmi_info.len =
+ reg_cfg_cmd->u.dmi_info.len / 2;
for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
lo_val = *lo_tbl_ptr++;
if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 0c9959c..67cf049 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -1065,7 +1065,7 @@
}
dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
- if (IS_ERR(dev)) {
+ if (IS_ERR_OR_NULL(dev)) {
dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
return PTR_ERR(dev);
}
diff --git a/drivers/staging/zram/Kconfig b/drivers/staging/zram/Kconfig
index 9d11a4c..983314c 100644
--- a/drivers/staging/zram/Kconfig
+++ b/drivers/staging/zram/Kconfig
@@ -1,9 +1,6 @@
config ZRAM
tristate "Compressed RAM block device support"
- # X86 dependency is because zsmalloc uses non-portable pte/tlb
- # functions
- depends on BLOCK && SYSFS && X86
- select ZSMALLOC
+ depends on BLOCK && SYSFS && ZSMALLOC
select LZO_COMPRESS
select LZO_DECOMPRESS
default n
@@ -17,7 +14,7 @@
disks and maybe many more.
See zram.txt for more information.
- Project home: http://compcache.googlecode.com/
+ Project home: <https://compcache.googlecode.com/>
config ZRAM_DEBUG
bool "Compressed RAM block device debug support"
diff --git a/drivers/staging/zram/Makefile b/drivers/staging/zram/Makefile
index 7f4a301..cb0f9ce 100644
--- a/drivers/staging/zram/Makefile
+++ b/drivers/staging/zram/Makefile
@@ -1,3 +1,3 @@
-zram-y := zram_drv.o zram_sysfs.o
+zram-y := zram_drv.o
obj-$(CONFIG_ZRAM) += zram.o
diff --git a/drivers/staging/zram/zram.txt b/drivers/staging/zram/zram.txt
index 5f75d29..765d790 100644
--- a/drivers/staging/zram/zram.txt
+++ b/drivers/staging/zram/zram.txt
@@ -23,17 +23,17 @@
This creates 4 devices: /dev/zram{0,1,2,3}
(num_devices parameter is optional. Default: 1)
-2) Set Disksize (Optional):
- Set disk size by writing the value to sysfs node 'disksize'
- (in bytes). If disksize is not given, default value of 25%
- of RAM is used.
+2) Set Disksize
+ Set disk size by writing the value to sysfs node 'disksize'.
+ The value can be either in bytes or you can use mem suffixes.
+ Examples:
+ # Initialize /dev/zram0 with 50MB disksize
+ echo $((50*1024*1024)) > /sys/block/zram0/disksize
- # Initialize /dev/zram0 with 50MB disksize
- echo $((50*1024*1024)) > /sys/block/zram0/disksize
-
- NOTE: disksize cannot be changed if the disk contains any
- data. So, for such a disk, you need to issue 'reset' (see below)
- before you can change its disksize.
+ # Using mem suffixes
+ echo 256K > /sys/block/zram0/disksize
+ echo 512M > /sys/block/zram0/disksize
+ echo 1G > /sys/block/zram0/disksize
3) Activate:
mkswap /dev/zram0
@@ -65,8 +65,9 @@
echo 1 > /sys/block/zram0/reset
echo 1 > /sys/block/zram1/reset
- (This frees all the memory allocated for the given device).
-
+ This frees all the memory allocated for the given device and
+ resets the disksize to zero. You must set the disksize again
+ before reusing the device.
Please report any problems at:
- Mailing list: linux-mm-cc at laptop dot org
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 685d612..44949d2 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -37,56 +37,216 @@
/* Globals */
static int zram_major;
-struct zram *zram_devices;
+static struct zram *zram_devices;
/* Module params (documentation at end) */
-static unsigned int num_devices;
+static unsigned int num_devices = 1;
-static void zram_stat_inc(u32 *v)
+static inline struct zram *dev_to_zram(struct device *dev)
{
- *v = *v + 1;
+ return (struct zram *)dev_to_disk(dev)->private_data;
}
-static void zram_stat_dec(u32 *v)
+static ssize_t disksize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- *v = *v - 1;
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n", zram->disksize);
}
-static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
+static ssize_t initstate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- spin_lock(&zram->stat64_lock);
- *v = *v + inc;
- spin_unlock(&zram->stat64_lock);
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%u\n", zram->init_done);
}
-static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
+static ssize_t num_reads_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- spin_lock(&zram->stat64_lock);
- *v = *v - dec;
- spin_unlock(&zram->stat64_lock);
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.num_reads));
}
-static void zram_stat64_inc(struct zram *zram, u64 *v)
+static ssize_t num_writes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- zram_stat64_add(zram, v, 1);
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.num_writes));
}
-static int zram_test_flag(struct zram *zram, u32 index,
+static ssize_t invalid_io_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.invalid_io));
+}
+
+static ssize_t notify_free_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.notify_free));
+}
+
+static ssize_t zero_pages_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%u\n", zram->stats.pages_zero);
+}
+
+static ssize_t orig_data_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
+}
+
+static ssize_t compr_data_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+
+ return sprintf(buf, "%llu\n",
+ (u64)atomic64_read(&zram->stats.compr_size));
+}
+
+static ssize_t mem_used_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta = zram->meta;
+
+ down_read(&zram->init_lock);
+ if (zram->init_done)
+ val = zs_get_total_size_bytes(meta->mem_pool);
+ up_read(&zram->init_lock);
+
+ return sprintf(buf, "%llu\n", val);
+}
+
+static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- return zram->table[index].flags & BIT(flag);
+ return meta->table[index].flags & BIT(flag);
}
-static void zram_set_flag(struct zram *zram, u32 index,
+static void zram_set_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].flags |= BIT(flag);
+ meta->table[index].flags |= BIT(flag);
}
-static void zram_clear_flag(struct zram *zram, u32 index,
+static void zram_clear_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- zram->table[index].flags &= ~BIT(flag);
+ meta->table[index].flags &= ~BIT(flag);
+}
+
+static inline int is_partial_io(struct bio_vec *bvec)
+{
+ return bvec->bv_len != PAGE_SIZE;
+}
+
+/*
+ * Check if request is within bounds and aligned on zram logical blocks.
+ */
+static inline int valid_io_request(struct zram *zram, struct bio *bio)
+{
+ u64 start, end, bound;
+
+ /* unaligned request */
+ if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
+ return 0;
+ if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
+ return 0;
+
+ start = bio->bi_sector;
+ end = start + (bio->bi_size >> SECTOR_SHIFT);
+ bound = zram->disksize >> SECTOR_SHIFT;
+ /* out of range range */
+ if (unlikely(start >= bound || end > bound || start > end))
+ return 0;
+
+ /* I/O request is valid */
+ return 1;
+}
+
+static void zram_meta_free(struct zram_meta *meta)
+{
+ zs_destroy_pool(meta->mem_pool);
+ kfree(meta->compress_workmem);
+ free_pages((unsigned long)meta->compress_buffer, 1);
+ vfree(meta->table);
+ kfree(meta);
+}
+
+static struct zram_meta *zram_meta_alloc(u64 disksize)
+{
+ size_t num_pages;
+ struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ goto out;
+
+ meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!meta->compress_workmem)
+ goto free_meta;
+
+ meta->compress_buffer =
+ (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!meta->compress_buffer) {
+ pr_err("Error allocating compressor buffer space\n");
+ goto free_workmem;
+ }
+
+ num_pages = disksize >> PAGE_SHIFT;
+ meta->table = vzalloc(num_pages * sizeof(*meta->table));
+ if (!meta->table) {
+ pr_err("Error allocating zram address table\n");
+ goto free_buffer;
+ }
+
+ meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
+ if (!meta->mem_pool) {
+ pr_err("Error creating memory pool\n");
+ goto free_table;
+ }
+
+ return meta;
+
+free_table:
+ vfree(meta->table);
+free_buffer:
+ free_pages((unsigned long)meta->compress_buffer, 1);
+free_workmem:
+ kfree(meta->compress_workmem);
+free_meta:
+ kfree(meta);
+ meta = NULL;
+out:
+ return meta;
+}
+
+static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+{
+ if (*offset + bvec->bv_len >= PAGE_SIZE)
+ (*index)++;
+ *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}
static int page_zero_filled(void *ptr)
@@ -104,349 +264,244 @@
return 1;
}
-static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
-{
- if (!zram->disksize) {
- pr_info(
- "disk size not provided. You can use disksize_kb module "
- "param to specify size.\nUsing default: (%u%% of RAM).\n",
- default_disksize_perc_ram
- );
- zram->disksize = default_disksize_perc_ram *
- (totalram_bytes / 100);
- }
-
- if (zram->disksize > 2 * (totalram_bytes)) {
- pr_info(
- "There is little point creating a zram of greater than "
- "twice the size of memory since we expect a 2:1 compression "
- "ratio. Note that zram uses about 0.1%% of the size of "
- "the disk when not in use so a huge zram is "
- "wasteful.\n"
- "\tMemory Size: %zu kB\n"
- "\tSize you selected: %llu kB\n"
- "Continuing anyway ...\n",
- totalram_bytes >> 10, zram->disksize
- );
- }
-
- zram->disksize &= PAGE_MASK;
-}
-
-static void zram_free_page(struct zram *zram, size_t index)
-{
- void *handle = zram->table[index].handle;
-
- if (unlikely(!handle)) {
- /*
- * No memory is allocated for zero filled pages.
- * Simply clear zero page flag.
- */
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
- zram_clear_flag(zram, index, ZRAM_ZERO);
- zram_stat_dec(&zram->stats.pages_zero);
- }
- return;
- }
-
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- __free_page(handle);
- zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
- zram_stat_dec(&zram->stats.pages_expand);
- goto out;
- }
-
- zs_free(zram->mem_pool, handle);
-
- if (zram->table[index].size <= PAGE_SIZE / 2)
- zram_stat_dec(&zram->stats.good_compress);
-
-out:
- zram_stat64_sub(zram, &zram->stats.compr_size,
- zram->table[index].size);
- zram_stat_dec(&zram->stats.pages_stored);
-
- zram->table[index].handle = NULL;
- zram->table[index].size = 0;
-}
-
static void handle_zero_page(struct bio_vec *bvec)
{
struct page *page = bvec->bv_page;
void *user_mem;
user_mem = kmap_atomic(page);
- memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
+ if (is_partial_io(bvec))
+ memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
+ else
+ clear_page(user_mem);
kunmap_atomic(user_mem);
flush_dcache_page(page);
}
-static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+static void zram_free_page(struct zram *zram, size_t index)
{
- struct page *page = bvec->bv_page;
- unsigned char *user_mem, *cmem;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
+ u16 size = meta->table[index].size;
- user_mem = kmap_atomic(page);
- cmem = kmap_atomic(zram->table[index].handle);
+ if (unlikely(!handle)) {
+ /*
+ * No memory is allocated for zero filled pages.
+ * Simply clear zero page flag.
+ */
+ if (zram_test_flag(meta, index, ZRAM_ZERO)) {
+ zram_clear_flag(meta, index, ZRAM_ZERO);
+ zram->stats.pages_zero--;
+ }
+ return;
+ }
- memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
- kunmap_atomic(cmem);
- kunmap_atomic(user_mem);
+ if (unlikely(size > max_zpage_size))
+ zram->stats.bad_compress--;
- flush_dcache_page(page);
+ zs_free(meta->mem_pool, handle);
+
+ if (size <= PAGE_SIZE / 2)
+ zram->stats.good_compress--;
+
+ atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
+ zram->stats.pages_stored--;
+
+ meta->table[index].handle = 0;
+ meta->table[index].size = 0;
}
-static inline int is_partial_io(struct bio_vec *bvec)
+static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
- return bvec->bv_len != PAGE_SIZE;
+ int ret = LZO_E_OK;
+ size_t clen = PAGE_SIZE;
+ unsigned char *cmem;
+ struct zram_meta *meta = zram->meta;
+ unsigned long handle = meta->table[index].handle;
+
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ clear_page(mem);
+ return 0;
+ }
+
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (meta->table[index].size == PAGE_SIZE)
+ copy_page(mem, cmem);
+ else
+ ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
+ mem, &clen);
+ zs_unmap_object(meta->mem_pool, handle);
+
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret != LZO_E_OK)) {
+ pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
+ atomic64_inc(&zram->stats.failed_reads);
+ return ret;
+ }
+
+ return 0;
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio)
{
int ret;
- size_t clen;
struct page *page;
- struct zobj_header *zheader;
- unsigned char *user_mem, *cmem, *uncmem = NULL;
-
+ unsigned char *user_mem, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- if (zram_test_flag(zram, index, ZRAM_ZERO)) {
+ if (unlikely(!meta->table[index].handle) ||
+ zram_test_flag(meta, index, ZRAM_ZERO)) {
handle_zero_page(bvec);
return 0;
}
- /* Requested page is not present in compressed area */
- if (unlikely(!zram->table[index].handle)) {
- pr_debug("Read before write: sector=%lu, size=%u",
- (ulong)(bio->bi_sector), bio->bi_size);
- handle_zero_page(bvec);
- return 0;
- }
-
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- handle_uncompressed_page(zram, bvec, index, offset);
- return 0;
- }
-
- if (is_partial_io(bvec)) {
+ if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!uncmem) {
- pr_info("Error allocating temp memory!\n");
- return -ENOMEM;
- }
- }
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
user_mem = kmap_atomic(page);
if (!is_partial_io(bvec))
uncmem = user_mem;
- clen = PAGE_SIZE;
- cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
-
- ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- zram->table[index].size,
- uncmem, &clen);
-
- if (is_partial_io(bvec)) {
- memcpy(user_mem + bvec->bv_offset, uncmem + offset,
- bvec->bv_len);
- kfree(uncmem);
+ if (!uncmem) {
+ pr_info("Unable to allocate temp memory\n");
+ ret = -ENOMEM;
+ goto out_cleanup;
}
- zs_unmap_object(zram->mem_pool, zram->table[index].handle);
- kunmap_atomic(user_mem);
-
+ ret = zram_decompress_page(zram, uncmem, index);
/* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
- return ret;
- }
+ if (unlikely(ret != LZO_E_OK))
+ goto out_cleanup;
+
+ if (is_partial_io(bvec))
+ memcpy(user_mem + bvec->bv_offset, uncmem + offset,
+ bvec->bv_len);
flush_dcache_page(page);
-
- return 0;
-}
-
-static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
-{
- int ret;
- size_t clen = PAGE_SIZE;
- struct zobj_header *zheader;
- unsigned char *cmem;
-
- if (zram_test_flag(zram, index, ZRAM_ZERO) ||
- !zram->table[index].handle) {
- memset(mem, 0, PAGE_SIZE);
- return 0;
- }
-
- cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
-
- /* Page is stored uncompressed since it's incompressible */
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- memcpy(mem, cmem, PAGE_SIZE);
- kunmap_atomic(cmem);
- return 0;
- }
-
- ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
- zram->table[index].size,
- mem, &clen);
- zs_unmap_object(zram->mem_pool, zram->table[index].handle);
-
- /* Should NEVER happen. Return bio error if it does. */
- if (unlikely(ret != LZO_E_OK)) {
- pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
- zram_stat64_inc(zram, &zram->stats.failed_reads);
- return ret;
- }
-
- return 0;
+ ret = 0;
+out_cleanup:
+ kunmap_atomic(user_mem);
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+ return ret;
}
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
- int ret;
- u32 store_offset;
+ int ret = 0;
size_t clen;
- void *handle;
- struct zobj_header *zheader;
- struct page *page, *page_store;
+ unsigned long handle;
+ struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
+ struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- src = zram->compress_buffer;
+ src = meta->compress_buffer;
if (is_partial_io(bvec)) {
/*
* This is a partial IO. We need to read the full page
* before to write the changes.
*/
- uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
if (!uncmem) {
- pr_info("Error allocating temp memory!\n");
ret = -ENOMEM;
goto out;
}
- ret = zram_read_before_write(zram, uncmem, index);
- if (ret) {
- kfree(uncmem);
+ ret = zram_decompress_page(zram, uncmem, index);
+ if (ret)
goto out;
- }
}
- /*
- * System overwrites unused sectors. Free memory associated
- * with this sector now.
- */
- if (zram->table[index].handle ||
- zram_test_flag(zram, index, ZRAM_ZERO))
- zram_free_page(zram, index);
-
user_mem = kmap_atomic(page);
- if (is_partial_io(bvec))
+ if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
- else
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ } else {
uncmem = user_mem;
+ }
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
- zram_stat_inc(&zram->stats.pages_zero);
- zram_set_flag(zram, index, ZRAM_ZERO);
+ /* Free memory associated with this sector now. */
+ zram_free_page(zram, index);
+
+ zram->stats.pages_zero++;
+ zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0;
goto out;
}
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
- zram->compress_workmem);
+ meta->compress_workmem);
- kunmap_atomic(user_mem);
- if (is_partial_io(bvec))
- kfree(uncmem);
+ if (!is_partial_io(bvec)) {
+ kunmap_atomic(user_mem);
+ user_mem = NULL;
+ uncmem = NULL;
+ }
if (unlikely(ret != LZO_E_OK)) {
pr_err("Compression failed! err=%d\n", ret);
goto out;
}
- /*
- * Page is incompressible. Store it as-is (uncompressed)
- * since we do not want to return too many disk write
- * errors which has side effect of hanging the system.
- */
if (unlikely(clen > max_zpage_size)) {
+ zram->stats.bad_compress++;
clen = PAGE_SIZE;
- page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
- if (unlikely(!page_store)) {
- pr_info("Error allocating memory for "
- "incompressible page: %u\n", index);
- ret = -ENOMEM;
- goto out;
- }
-
- store_offset = 0;
- zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
- zram_stat_inc(&zram->stats.pages_expand);
- handle = page_store;
- src = kmap_atomic(page);
- cmem = kmap_atomic(page_store);
- goto memstore;
+ src = NULL;
+ if (is_partial_io(bvec))
+ src = uncmem;
}
- handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
+ handle = zs_malloc(meta->mem_pool, clen);
if (!handle) {
- pr_info("Error allocating memory for compressed "
- "page: %u, size=%zu\n", index, clen);
+ pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
+ index, clen);
ret = -ENOMEM;
goto out;
}
- cmem = zs_map_object(zram->mem_pool, handle);
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
-memstore:
-#if 0
- /* Back-reference needed for memory defragmentation */
- if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
- zheader = (struct zobj_header *)cmem;
- zheader->table_idx = index;
- cmem += sizeof(*zheader);
- }
-#endif
-
- memcpy(cmem, src, clen);
-
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
- kunmap_atomic(cmem);
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+ src = kmap_atomic(page);
+ copy_page(cmem, src);
kunmap_atomic(src);
} else {
- zs_unmap_object(zram->mem_pool, handle);
+ memcpy(cmem, src, clen);
}
- zram->table[index].handle = handle;
- zram->table[index].size = clen;
+ zs_unmap_object(meta->mem_pool, handle);
+
+ /*
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+ zram_free_page(zram, index);
+
+ meta->table[index].handle = handle;
+ meta->table[index].size = clen;
/* Update stats */
- zram_stat64_add(zram, &zram->stats.compr_size, clen);
- zram_stat_inc(&zram->stats.pages_stored);
+ atomic64_add(clen, &zram->stats.compr_size);
+ zram->stats.pages_stored++;
if (clen <= PAGE_SIZE / 2)
- zram_stat_inc(&zram->stats.good_compress);
-
- return 0;
+ zram->stats.good_compress++;
out:
+ if (is_partial_io(bvec))
+ kfree(uncmem);
+
if (ret)
- zram_stat64_inc(zram, &zram->stats.failed_writes);
+ atomic64_inc(&zram->stats.failed_writes);
return ret;
}
@@ -468,11 +523,121 @@
return ret;
}
-static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
+static void zram_reset_device(struct zram *zram)
{
- if (*offset + bvec->bv_len >= PAGE_SIZE)
- (*index)++;
- *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
+ size_t index;
+ struct zram_meta *meta;
+
+ down_write(&zram->init_lock);
+ if (!zram->init_done) {
+ up_write(&zram->init_lock);
+ return;
+ }
+
+ meta = zram->meta;
+ zram->init_done = 0;
+
+ /* Free all pages that are still in this zram device */
+ for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
+ unsigned long handle = meta->table[index].handle;
+ if (!handle)
+ continue;
+
+ zs_free(meta->mem_pool, handle);
+ }
+
+ zram_meta_free(zram->meta);
+ zram->meta = NULL;
+ /* Reset stats */
+ memset(&zram->stats, 0, sizeof(zram->stats));
+
+ zram->disksize = 0;
+ set_capacity(zram->disk, 0);
+ up_write(&zram->init_lock);
+}
+
+static void zram_init_device(struct zram *zram, struct zram_meta *meta)
+{
+ if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
+ pr_info(
+ "There is little point creating a zram of greater than "
+ "twice the size of memory since we expect a 2:1 compression "
+ "ratio. Note that zram uses about 0.1%% of the size of "
+ "the disk when not in use so a huge zram is "
+ "wasteful.\n"
+ "\tMemory Size: %lu kB\n"
+ "\tSize you selected: %llu kB\n"
+ "Continuing anyway ...\n",
+ (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
+ );
+ }
+
+ /* zram devices sort of resembles non-rotational disks */
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
+
+ zram->meta = meta;
+ zram->init_done = 1;
+
+ pr_debug("Initialization done!\n");
+}
+
+static ssize_t disksize_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ u64 disksize;
+ struct zram_meta *meta;
+ struct zram *zram = dev_to_zram(dev);
+
+ disksize = memparse(buf, NULL);
+ if (!disksize)
+ return -EINVAL;
+
+ disksize = PAGE_ALIGN(disksize);
+ meta = zram_meta_alloc(disksize);
+ down_write(&zram->init_lock);
+ if (zram->init_done) {
+ up_write(&zram->init_lock);
+ zram_meta_free(meta);
+ pr_info("Cannot change disksize for initialized device\n");
+ return -EBUSY;
+ }
+
+ zram->disksize = disksize;
+ set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ zram_init_device(zram, meta);
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int ret;
+ unsigned short do_reset;
+ struct zram *zram;
+ struct block_device *bdev;
+
+ zram = dev_to_zram(dev);
+ bdev = bdget_disk(zram->disk, 0);
+
+ /* Do not reset an active device! */
+ if (bdev->bd_holders)
+ return -EBUSY;
+
+ ret = kstrtou16(buf, 10, &do_reset);
+ if (ret)
+ return ret;
+
+ if (!do_reset)
+ return -EINVAL;
+
+ /* Make sure all pending I/O is finished */
+ if (bdev)
+ fsync_bdev(bdev);
+
+ zram_reset_device(zram);
+ return len;
}
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
@@ -483,10 +648,10 @@
switch (rw) {
case READ:
- zram_stat64_inc(zram, &zram->stats.num_reads);
+ atomic64_inc(&zram->stats.num_reads);
break;
case WRITE:
- zram_stat64_inc(zram, &zram->stats.num_writes);
+ atomic64_inc(&zram->stats.num_writes);
break;
}
@@ -531,39 +696,19 @@
}
/*
- * Check if request is within bounds and aligned on zram logical blocks.
- */
-static inline int valid_io_request(struct zram *zram, struct bio *bio)
-{
- if (unlikely(
- (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
- (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
- (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
-
- return 0;
- }
-
- /* I/O request is valid */
- return 1;
-}
-
-/*
* Handler function for all zram I/O requests.
*/
static void zram_make_request(struct request_queue *queue, struct bio *bio)
{
struct zram *zram = queue->queuedata;
- if (unlikely(!zram->init_done) && zram_init_device(zram))
- goto error;
-
down_read(&zram->init_lock);
if (unlikely(!zram->init_done))
- goto error_unlock;
+ goto error;
if (!valid_io_request(zram, bio)) {
- zram_stat64_inc(zram, &zram->stats.invalid_io);
- goto error_unlock;
+ atomic64_inc(&zram->stats.invalid_io);
+ goto error;
}
__zram_make_request(zram, bio, bio_data_dir(bio));
@@ -571,129 +716,21 @@
return;
-error_unlock:
- up_read(&zram->init_lock);
error:
+ up_read(&zram->init_lock);
bio_io_error(bio);
}
-void __zram_reset_device(struct zram *zram)
-{
- size_t index;
-
- zram->init_done = 0;
-
- /* Free various per-device buffers */
- kfree(zram->compress_workmem);
- free_pages((unsigned long)zram->compress_buffer, 1);
-
- zram->compress_workmem = NULL;
- zram->compress_buffer = NULL;
-
- /* Free all pages that are still in this zram device */
- for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
- void *handle = zram->table[index].handle;
- if (!handle)
- continue;
-
- if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- __free_page(handle);
- else
- zs_free(zram->mem_pool, handle);
- }
-
- vfree(zram->table);
- zram->table = NULL;
-
- zs_destroy_pool(zram->mem_pool);
- zram->mem_pool = NULL;
-
- /* Reset stats */
- memset(&zram->stats, 0, sizeof(zram->stats));
-
- zram->disksize = 0;
-}
-
-void zram_reset_device(struct zram *zram)
-{
- down_write(&zram->init_lock);
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
-}
-
-int zram_init_device(struct zram *zram)
-{
- int ret;
- size_t num_pages;
-
- down_write(&zram->init_lock);
-
- if (zram->init_done) {
- up_write(&zram->init_lock);
- return 0;
- }
-
- zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
-
- zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
- if (!zram->compress_workmem) {
- pr_err("Error allocating compressor working memory!\n");
- ret = -ENOMEM;
- goto fail_no_table;
- }
-
- zram->compress_buffer =
- (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
- if (!zram->compress_buffer) {
- pr_err("Error allocating compressor buffer space\n");
- ret = -ENOMEM;
- goto fail_no_table;
- }
-
- num_pages = zram->disksize >> PAGE_SHIFT;
- zram->table = vzalloc(num_pages * sizeof(*zram->table));
- if (!zram->table) {
- pr_err("Error allocating zram address table\n");
- ret = -ENOMEM;
- goto fail_no_table;
- }
-
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
-
- /* zram devices sort of resembles non-rotational disks */
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
-
- zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
- if (!zram->mem_pool) {
- pr_err("Error creating memory pool\n");
- ret = -ENOMEM;
- goto fail;
- }
-
- zram->init_done = 1;
- up_write(&zram->init_lock);
-
- pr_debug("Initialization done!\n");
- return 0;
-
-fail_no_table:
- /* To prevent accessing table entries during cleanup */
- zram->disksize = 0;
-fail:
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
- pr_err("Initialization failed: err=%d\n", ret);
- return ret;
-}
-
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
zram = bdev->bd_disk->private_data;
+ down_write(&zram->lock);
zram_free_page(zram, index);
- zram_stat64_inc(zram, &zram->stats.notify_free);
+ up_write(&zram->lock);
+ atomic64_inc(&zram->stats.notify_free);
}
static const struct block_device_operations zram_devops = {
@@ -701,19 +738,49 @@
.owner = THIS_MODULE
};
+static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
+ disksize_show, disksize_store);
+static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
+static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
+static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
+static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
+static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
+static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
+static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
+static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
+static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
+static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
+
+static struct attribute *zram_disk_attrs[] = {
+ &dev_attr_disksize.attr,
+ &dev_attr_initstate.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_num_reads.attr,
+ &dev_attr_num_writes.attr,
+ &dev_attr_invalid_io.attr,
+ &dev_attr_notify_free.attr,
+ &dev_attr_zero_pages.attr,
+ &dev_attr_orig_data_size.attr,
+ &dev_attr_compr_data_size.attr,
+ &dev_attr_mem_used_total.attr,
+ NULL,
+};
+
+static struct attribute_group zram_disk_attr_group = {
+ .attrs = zram_disk_attrs,
+};
+
static int create_device(struct zram *zram, int device_id)
{
- int ret = 0;
+ int ret = -ENOMEM;
init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
- spin_lock_init(&zram->stat64_lock);
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
device_id);
- ret = -ENOMEM;
goto out;
}
@@ -723,11 +790,9 @@
/* gendisk structure */
zram->disk = alloc_disk(1);
if (!zram->disk) {
- blk_cleanup_queue(zram->queue);
- pr_warning("Error allocating disk structure for device %d\n",
+ pr_warn("Error allocating disk structure for device %d\n",
device_id);
- ret = -ENOMEM;
- goto out;
+ goto out_free_queue;
}
zram->disk->major = zram_major;
@@ -755,12 +820,18 @@
ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
&zram_disk_attr_group);
if (ret < 0) {
- pr_warning("Error creating sysfs group");
- goto out;
+ pr_warn("Error creating sysfs group");
+ goto out_free_disk;
}
zram->init_done = 0;
+ return 0;
+out_free_disk:
+ del_gendisk(zram->disk);
+ put_disk(zram->disk);
+out_free_queue:
+ blk_cleanup_queue(zram->queue);
out:
return ret;
}
@@ -779,17 +850,12 @@
blk_cleanup_queue(zram->queue);
}
-unsigned int zram_get_num_devices(void)
-{
- return num_devices;
-}
-
static int __init zram_init(void)
{
int ret, dev_id;
if (num_devices > max_num_devices) {
- pr_warning("Invalid value for num_devices: %u\n",
+ pr_warn("Invalid value for num_devices: %u\n",
num_devices);
ret = -EINVAL;
goto out;
@@ -797,18 +863,12 @@
zram_major = register_blkdev(0, "zram");
if (zram_major <= 0) {
- pr_warning("Unable to get major number\n");
+ pr_warn("Unable to get major number\n");
ret = -EBUSY;
goto out;
}
- if (!num_devices) {
- pr_info("num_devices not specified. Using default: 1\n");
- num_devices = 1;
- }
-
/* Allocate the device array and initialize each one */
- pr_info("Creating %u devices ...\n", num_devices);
zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
if (!zram_devices) {
ret = -ENOMEM;
@@ -821,6 +881,8 @@
goto free_devices;
}
+ pr_info("Created %u device(s) ...\n", num_devices);
+
return 0;
free_devices:
@@ -841,9 +903,10 @@
for (i = 0; i < num_devices; i++) {
zram = &zram_devices[i];
+ get_disk(zram->disk);
destroy_device(zram);
- if (zram->init_done)
- zram_reset_device(zram);
+ zram_reset_device(zram);
+ put_disk(zram->disk);
}
unregister_blkdev(zram_major, "zram");
@@ -852,12 +915,13 @@
pr_debug("Cleanup done!\n");
}
-module_param(num_devices, uint, 0);
-MODULE_PARM_DESC(num_devices, "Number of zram devices");
-
module_init(zram_init);
module_exit(zram_exit);
+module_param(num_devices, uint, 0);
+MODULE_PARM_DESC(num_devices, "Number of zram devices");
+
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
MODULE_DESCRIPTION("Compressed RAM Block Device");
+MODULE_ALIAS("devname:zram");
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index fbe8ac9..9e57bfb 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -26,23 +26,8 @@
*/
static const unsigned max_num_devices = 32;
-/*
- * Stored at beginning of each compressed object.
- *
- * It stores back-reference to table entry which points to this
- * object. This is required to support memory defragmentation.
- */
-struct zobj_header {
-#if 0
- u32 table_idx;
-#endif
-};
-
/*-- Configurable parameters */
-/* Default zram disk size: 25% of total RAM */
-static const unsigned default_disksize_perc_ram = 25;
-
/*
* Pages that compress to size greater than this are stored
* uncompressed in memory.
@@ -51,8 +36,8 @@
/*
* NOTE: max_zpage_size must be less than or equal to:
- * ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
- * otherwise, xv_malloc() would always return failure.
+ * ZS_MAX_ALLOC_SIZE. Otherwise, zs_malloc() would
+ * always return failure.
*/
/*-- End of configurable params */
@@ -68,9 +53,6 @@
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
- /* Page is stored uncompressed */
- ZRAM_UNCOMPRESSED,
-
/* Page consists entirely of zeros */
ZRAM_ZERO,
@@ -81,34 +63,42 @@
/* Allocated for each disk page */
struct table {
- void *handle;
+ unsigned long handle;
u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */
u8 flags;
-} __attribute__((aligned(4)));
+} __aligned(4);
+/*
+ * All 64bit fields should only be manipulated by 64bit atomic accessors.
+ * All modifications to 32bit counter should be protected by zram->lock.
+ */
struct zram_stats {
- u64 compr_size; /* compressed size of pages stored */
- u64 num_reads; /* failed + successful */
- u64 num_writes; /* --do-- */
- u64 failed_reads; /* should NEVER! happen */
- u64 failed_writes; /* can happen when memory is too low */
- u64 invalid_io; /* non-page-aligned I/O requests */
- u64 notify_free; /* no. of swap slot free notifications */
+ atomic64_t compr_size; /* compressed size of pages stored */
+ atomic64_t num_reads; /* failed + successful */
+ atomic64_t num_writes; /* --do-- */
+ atomic64_t failed_reads; /* should NEVER! happen */
+ atomic64_t failed_writes; /* can happen when memory is too low */
+ atomic64_t invalid_io; /* non-page-aligned I/O requests */
+ atomic64_t notify_free; /* no. of swap slot free notifications */
u32 pages_zero; /* no. of zero filled pages */
u32 pages_stored; /* no. of pages currently stored */
u32 good_compress; /* % of pages with compression ratio<=50% */
- u32 pages_expand; /* % of incompressible pages */
+ u32 bad_compress; /* % of pages with compression ratio>=75% */
};
-struct zram {
- struct zs_pool *mem_pool;
+struct zram_meta {
void *compress_workmem;
void *compress_buffer;
struct table *table;
- spinlock_t stat64_lock; /* protect 64-bit stats */
- struct rw_semaphore lock; /* protect compression buffers and table
- * against concurrent read and writes */
+ struct zs_pool *mem_pool;
+};
+
+struct zram {
+ struct zram_meta *meta;
+ struct rw_semaphore lock; /* protect compression buffers, table,
+ * 32bit stat counters against concurrent
+ * notifications, reads and writes */
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -122,14 +112,4 @@
struct zram_stats stats;
};
-
-extern struct zram *zram_devices;
-unsigned int zram_get_num_devices(void);
-#ifdef CONFIG_SYSFS
-extern struct attribute_group zram_disk_attr_group;
-#endif
-
-extern int zram_init_device(struct zram *zram);
-extern void __zram_reset_device(struct zram *zram);
-
#endif
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
deleted file mode 100644
index a7f3771..0000000
--- a/drivers/staging/zram/zram_sysfs.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Compressed RAM block device
- *
- * Copyright (C) 2008, 2009, 2010 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the licence that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- *
- * Project home: http://compcache.googlecode.com/
- */
-
-#include <linux/device.h>
-#include <linux/genhd.h>
-#include <linux/mm.h>
-
-#include "zram_drv.h"
-
-static u64 zram_stat64_read(struct zram *zram, u64 *v)
-{
- u64 val;
-
- spin_lock(&zram->stat64_lock);
- val = *v;
- spin_unlock(&zram->stat64_lock);
-
- return val;
-}
-
-static struct zram *dev_to_zram(struct device *dev)
-{
- int i;
- struct zram *zram = NULL;
-
- for (i = 0; i < zram_get_num_devices(); i++) {
- zram = &zram_devices[i];
- if (disk_to_dev(zram->disk) == dev)
- break;
- }
-
- return zram;
-}
-
-static ssize_t disksize_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n", zram->disksize);
-}
-
-static ssize_t disksize_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- int ret;
- u64 disksize;
- struct zram *zram = dev_to_zram(dev);
-
- ret = kstrtoull(buf, 10, &disksize);
- if (ret)
- return ret;
-
- down_write(&zram->init_lock);
- if (zram->init_done) {
- up_write(&zram->init_lock);
- pr_info("Cannot change disksize for initialized device\n");
- return -EBUSY;
- }
-
- zram->disksize = PAGE_ALIGN(disksize);
- set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- up_write(&zram->init_lock);
-
- return len;
-}
-
-static ssize_t initstate_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->init_done);
-}
-
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
-{
- int ret;
- unsigned short do_reset;
- struct zram *zram;
- struct block_device *bdev;
-
- zram = dev_to_zram(dev);
- bdev = bdget_disk(zram->disk, 0);
-
- /* Do not reset an active device! */
- if (bdev->bd_holders)
- return -EBUSY;
-
- ret = kstrtou16(buf, 10, &do_reset);
- if (ret)
- return ret;
-
- if (!do_reset)
- return -EINVAL;
-
- /* Make sure all pending I/O is finished */
- if (bdev)
- fsync_bdev(bdev);
-
- down_write(&zram->init_lock);
- if (zram->init_done)
- __zram_reset_device(zram);
- up_write(&zram->init_lock);
-
- return len;
-}
-
-static ssize_t num_reads_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.num_reads));
-}
-
-static ssize_t num_writes_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.num_writes));
-}
-
-static ssize_t invalid_io_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.invalid_io));
-}
-
-static ssize_t notify_free_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.notify_free));
-}
-
-static ssize_t zero_pages_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%u\n", zram->stats.pages_zero);
-}
-
-static ssize_t orig_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- (u64)(zram->stats.pages_stored) << PAGE_SHIFT);
-}
-
-static ssize_t compr_data_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct zram *zram = dev_to_zram(dev);
-
- return sprintf(buf, "%llu\n",
- zram_stat64_read(zram, &zram->stats.compr_size));
-}
-
-static ssize_t mem_used_total_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- u64 val = 0;
- struct zram *zram = dev_to_zram(dev);
-
- if (zram->init_done) {
- val = zs_get_total_size_bytes(zram->mem_pool) +
- ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
- }
-
- return sprintf(buf, "%llu\n", val);
-}
-
-static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
- disksize_show, disksize_store);
-static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
-static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
-static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
-static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
-static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
-static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
-static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
-static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
-static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
-
-static struct attribute *zram_disk_attrs[] = {
- &dev_attr_disksize.attr,
- &dev_attr_initstate.attr,
- &dev_attr_reset.attr,
- &dev_attr_num_reads.attr,
- &dev_attr_num_writes.attr,
- &dev_attr_invalid_io.attr,
- &dev_attr_notify_free.attr,
- &dev_attr_zero_pages.attr,
- &dev_attr_orig_data_size.attr,
- &dev_attr_compr_data_size.attr,
- &dev_attr_mem_used_total.attr,
- NULL,
-};
-
-struct attribute_group zram_disk_attr_group = {
- .attrs = zram_disk_attrs,
-};
diff --git a/drivers/staging/zsmalloc/Kconfig b/drivers/staging/zsmalloc/Kconfig
index a5ab720..7fab032 100644
--- a/drivers/staging/zsmalloc/Kconfig
+++ b/drivers/staging/zsmalloc/Kconfig
@@ -1,9 +1,5 @@
config ZSMALLOC
- tristate "Memory allocator for compressed pages"
- # X86 dependency is because of the use of __flush_tlb_one and set_pte
- # in zsmalloc-main.c.
- # TODO: convert these to portable functions
- depends on X86
+ bool "Memory allocator for compressed pages"
default n
help
zsmalloc is a slab-based memory allocator designed to store
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 917461c..1a67537 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -10,6 +10,54 @@
* Released under the terms of GNU General Public License Version 2.0
*/
+
+/*
+ * This allocator is designed for use with zcache and zram. Thus, the
+ * allocator is supposed to work well under low memory conditions. In
+ * particular, it never attempts higher order page allocation which is
+ * very likely to fail under memory pressure. On the other hand, if we
+ * just use single (0-order) pages, it would suffer from very high
+ * fragmentation -- any object of size PAGE_SIZE/2 or larger would occupy
+ * an entire page. This was one of the major issues with its predecessor
+ * (xvmalloc).
+ *
+ * To overcome these issues, zsmalloc allocates a bunch of 0-order pages
+ * and links them together using various 'struct page' fields. These linked
+ * pages act as a single higher-order page i.e. an object can span 0-order
+ * page boundaries. The code refers to these linked pages as a single entity
+ * called zspage.
+ *
+ * Following is how we use various fields and flags of underlying
+ * struct page(s) to form a zspage.
+ *
+ * Usage of struct page fields:
+ * page->first_page: points to the first component (0-order) page
+ * page->index (union with page->freelist): offset of the first object
+ * starting in this page. For the first page, this is
+ * always 0, so we use this field (aka freelist) to point
+ * to the first free object in zspage.
+ * page->lru: links together all component pages (except the first page)
+ * of a zspage
+ *
+ * For _first_ page only:
+ *
+ * page->private (union with page->first_page): refers to the
+ * component page after the first page
+ * page->freelist: points to the first free object in zspage.
+ * Free objects are linked together using in-place
+ * metadata.
+ * page->objects: maximum number of objects we can store in this
+ * zspage (class->zspage_order * PAGE_SIZE / class->size)
+ * page->lru: links together first pages of various zspages.
+ * Basically forming list of zspages in a fullness group.
+ * page->mapping: class index and fullness group of the zspage
+ *
+ * Usage of struct page flags:
+ * PG_private: identifies the first component page
+ * PG_private2: identifies the last component page
+ *
+ */
+
#ifdef CONFIG_ZSMALLOC_DEBUG
#define DEBUG
#endif
@@ -27,9 +75,139 @@
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
#include "zsmalloc.h"
-#include "zsmalloc_int.h"
+
+/*
+ * This must be power of 2 and greater than of equal to sizeof(link_free).
+ * These two conditions ensure that any 'struct link_free' itself doesn't
+ * span more than 1 page which avoids complex case of mapping 2 pages simply
+ * to restore link_free pointer values.
+ */
+#define ZS_ALIGN 8
+
+/*
+ * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
+ * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
+ */
+#define ZS_MAX_ZSPAGE_ORDER 2
+#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
+
+/*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+ * as single (void *) handle value.
+ *
+ * Note that object index <obj_idx> is relative to system
+ * page <PFN> it is stored in, so for each sub-page belonging
+ * to a zspage, obj_idx starts with 0.
+ *
+ * This is made more complicated by various memory models and PAE.
+ */
+
+#ifndef MAX_PHYSMEM_BITS
+#ifdef CONFIG_HIGHMEM64G
+#define MAX_PHYSMEM_BITS 36
+#else /* !CONFIG_HIGHMEM64G */
+/*
+ * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
+ * be PAGE_SHIFT
+ */
+#define MAX_PHYSMEM_BITS BITS_PER_LONG
+#endif
+#endif
+#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
+#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
+
+#define MAX(a, b) ((a) >= (b) ? (a) : (b))
+/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
+#define ZS_MIN_ALLOC_SIZE \
+ MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
+#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
+
+/*
+ * On systems with 4K page size, this gives 254 size classes! There is a
+ * trader-off here:
+ * - Large number of size classes is potentially wasteful as free page are
+ * spread across these classes
+ * - Small number of size classes causes large internal fragmentation
+ * - Probably its better to use specific size classes (empirically
+ * determined). NOTE: all those class sizes must be set as multiple of
+ * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
+ *
+ * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
+ * (reason above)
+ */
+#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
+#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
+ ZS_SIZE_CLASS_DELTA + 1)
+
+/*
+ * We do not maintain any list for completely empty or full pages
+ */
+enum fullness_group {
+ ZS_ALMOST_FULL,
+ ZS_ALMOST_EMPTY,
+ _ZS_NR_FULLNESS_GROUPS,
+
+ ZS_EMPTY,
+ ZS_FULL
+};
+
+/*
+ * We assign a page to ZS_ALMOST_EMPTY fullness group when:
+ * n <= N / f, where
+ * n = number of allocated objects
+ * N = total number of objects zspage can store
+ * f = 1/fullness_threshold_frac
+ *
+ * Similarly, we assign zspage to:
+ * ZS_ALMOST_FULL when n > N / f
+ * ZS_EMPTY when n == 0
+ * ZS_FULL when n == N
+ *
+ * (see: fix_fullness_group())
+ */
+static const int fullness_threshold_frac = 4;
+
+struct size_class {
+ /*
+ * Size of objects stored in this class. Must be multiple
+ * of ZS_ALIGN.
+ */
+ int size;
+ unsigned int index;
+
+ /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
+ int pages_per_zspage;
+
+ spinlock_t lock;
+
+ /* stats */
+ u64 pages_allocated;
+
+ struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+};
+
+/*
+ * Placed within free objects to form a singly linked list.
+ * For every zspage, first_page->freelist gives head of this list.
+ *
+ * This must be power of 2 and less than or equal to ZS_ALIGN
+ */
+struct link_free {
+ /* Handle of next free chunk (encodes <PFN, obj_idx>) */
+ void *next;
+};
+
+struct zs_pool {
+ struct size_class size_class[ZS_SIZE_CLASSES];
+
+ gfp_t flags; /* allocation flags used when growing pool */
+};
/*
* A zspage's class index and fullness group
@@ -40,17 +218,39 @@
#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
+/*
+ * By default, zsmalloc uses a copy-based object mapping method to access
+ * allocations that span two pages. However, if a particular architecture
+ * performs VM mapping faster than copying, then it should be added here
+ * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use
+ * page table mapping rather than copying for object mapping.
+ */
+#if defined(CONFIG_ARM) && !defined(MODULE)
+#define USE_PGTABLE_MAPPING
+#endif
+
+struct mapping_area {
+#ifdef USE_PGTABLE_MAPPING
+ struct vm_struct *vm; /* vm area for mapping object that span pages */
+#else
+ char *vm_buf; /* copy buffer for objects that span pages */
+#endif
+ char *vm_addr; /* address of kmap_atomic()'ed pages */
+ enum zs_mapmode vm_mm; /* mapping mode */
+};
+
+
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
static int is_first_page(struct page *page)
{
- return test_bit(PG_private, &page->flags);
+ return PagePrivate(page);
}
static int is_last_page(struct page *page)
{
- return test_bit(PG_private_2, &page->flags);
+ return PagePrivate2(page);
}
static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
@@ -180,7 +380,7 @@
* link together 3 PAGE_SIZE sized pages to form a zspage
* since then we can perfectly fit in 8 such objects.
*/
-static int get_zspage_order(int class_size)
+static int get_pages_per_zspage(int class_size)
{
int i, max_usedpc = 0;
/* zspage order which gives maximum used size per KB */
@@ -223,7 +423,7 @@
if (is_last_page(page))
next = NULL;
else if (is_first_page(page))
- next = (struct page *)page->private;
+ next = (struct page *)page_private(page);
else
next = list_entry(page->lru.next, struct page, lru);
@@ -247,13 +447,11 @@
}
/* Decode <page, obj_idx> pair from the given object handle */
-static void obj_handle_to_location(void *handle, struct page **page,
+static void obj_handle_to_location(unsigned long handle, struct page **page,
unsigned long *obj_idx)
{
- unsigned long hval = (unsigned long)handle;
-
- *page = pfn_to_page(hval >> OBJ_INDEX_BITS);
- *obj_idx = hval & OBJ_INDEX_MASK;
+ *page = pfn_to_page(handle >> OBJ_INDEX_BITS);
+ *obj_idx = handle & OBJ_INDEX_MASK;
}
static unsigned long obj_idx_to_offset(struct page *page,
@@ -274,7 +472,7 @@
set_page_private(page, 0);
page->mapping = NULL;
page->freelist = NULL;
- reset_page_mapcount(page);
+ page_mapcount_reset(page);
}
static void free_zspage(struct page *first_page)
@@ -354,7 +552,7 @@
static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
{
int i, error;
- struct page *first_page = NULL;
+ struct page *first_page = NULL, *uninitialized_var(prev_page);
/*
* Allocate individual pages and link them together as:
@@ -368,8 +566,8 @@
* identify the last page.
*/
error = -ENOMEM;
- for (i = 0; i < class->zspage_order; i++) {
- struct page *page, *prev_page;
+ for (i = 0; i < class->pages_per_zspage; i++) {
+ struct page *page;
page = alloc_page(flags);
if (!page)
@@ -377,20 +575,19 @@
INIT_LIST_HEAD(&page->lru);
if (i == 0) { /* first page */
- set_bit(PG_private, &page->flags);
+ SetPagePrivate(page);
set_page_private(page, 0);
first_page = page;
first_page->inuse = 0;
}
if (i == 1)
- first_page->private = (unsigned long)page;
+ set_page_private(first_page, (unsigned long)page);
if (i >= 1)
page->first_page = first_page;
if (i >= 2)
list_add(&page->lru, &prev_page->lru);
- if (i == class->zspage_order - 1) /* last page */
- set_bit(PG_private_2, &page->flags);
-
+ if (i == class->pages_per_zspage - 1) /* last page */
+ SetPagePrivate2(page);
prev_page = page;
}
@@ -398,7 +595,7 @@
first_page->freelist = obj_location_to_handle(first_page, 0);
/* Maximum number of objects we can store in this zspage */
- first_page->objects = class->zspage_order * PAGE_SIZE / class->size;
+ first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
error = 0; /* Success */
@@ -425,34 +622,141 @@
return page;
}
+#ifdef USE_PGTABLE_MAPPING
+static inline int __zs_cpu_up(struct mapping_area *area)
+{
+ /*
+ * Make sure we don't leak memory if a cpu UP notification
+ * and zs_init() race and both call zs_cpu_up() on the same cpu
+ */
+ if (area->vm)
+ return 0;
+ area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
+ if (!area->vm)
+ return -ENOMEM;
+ return 0;
+}
-/*
- * If this becomes a separate module, register zs_init() with
- * module_init(), zs_exit with module_exit(), and remove zs_initialized
-*/
-static int zs_initialized;
+static inline void __zs_cpu_down(struct mapping_area *area)
+{
+ if (area->vm)
+ free_vm_area(area->vm);
+ area->vm = NULL;
+}
+
+static inline void *__zs_map_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
+ area->vm_addr = area->vm->addr;
+ return area->vm_addr + off;
+}
+
+static inline void __zs_unmap_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ unsigned long addr = (unsigned long)area->vm_addr;
+
+ unmap_kernel_range(addr, PAGE_SIZE * 2);
+}
+
+#else /* USE_PGTABLE_MAPPING */
+
+static inline int __zs_cpu_up(struct mapping_area *area)
+{
+ /*
+ * Make sure we don't leak memory if a cpu UP notification
+ * and zs_init() race and both call zs_cpu_up() on the same cpu
+ */
+ if (area->vm_buf)
+ return 0;
+ area->vm_buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!area->vm_buf)
+ return -ENOMEM;
+ return 0;
+}
+
+static inline void __zs_cpu_down(struct mapping_area *area)
+{
+ if (area->vm_buf)
+ free_page((unsigned long)area->vm_buf);
+ area->vm_buf = NULL;
+}
+
+static void *__zs_map_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ int sizes[2];
+ void *addr;
+ char *buf = area->vm_buf;
+
+ /* disable page faults to match kmap_atomic() return conditions */
+ pagefault_disable();
+
+ /* no read fastpath */
+ if (area->vm_mm == ZS_MM_WO)
+ goto out;
+
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = size - sizes[0];
+
+ /* copy object to per-cpu buffer */
+ addr = kmap_atomic(pages[0]);
+ memcpy(buf, addr + off, sizes[0]);
+ kunmap_atomic(addr);
+ addr = kmap_atomic(pages[1]);
+ memcpy(buf + sizes[0], addr, sizes[1]);
+ kunmap_atomic(addr);
+out:
+ return area->vm_buf;
+}
+
+static void __zs_unmap_object(struct mapping_area *area,
+ struct page *pages[2], int off, int size)
+{
+ int sizes[2];
+ void *addr;
+ char *buf = area->vm_buf;
+
+ /* no write fastpath */
+ if (area->vm_mm == ZS_MM_RO)
+ goto out;
+
+ sizes[0] = PAGE_SIZE - off;
+ sizes[1] = size - sizes[0];
+
+ /* copy per-cpu buffer to object */
+ addr = kmap_atomic(pages[0]);
+ memcpy(addr + off, buf, sizes[0]);
+ kunmap_atomic(addr);
+ addr = kmap_atomic(pages[1]);
+ memcpy(addr, buf + sizes[0], sizes[1]);
+ kunmap_atomic(addr);
+
+out:
+ /* enable page faults to match kunmap_atomic() return conditions */
+ pagefault_enable();
+}
+
+#endif /* USE_PGTABLE_MAPPING */
static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
void *pcpu)
{
- int cpu = (long)pcpu;
+ int ret, cpu = (long)pcpu;
struct mapping_area *area;
switch (action) {
case CPU_UP_PREPARE:
area = &per_cpu(zs_map_area, cpu);
- if (area->vm)
- break;
- area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes);
- if (!area->vm)
- return notifier_from_errno(-ENOMEM);
+ ret = __zs_cpu_up(area);
+ if (ret)
+ return notifier_from_errno(ret);
break;
case CPU_DEAD:
case CPU_UP_CANCELED:
area = &per_cpu(zs_map_area, cpu);
- if (area->vm)
- free_vm_area(area->vm);
- area->vm = NULL;
+ __zs_cpu_down(area);
break;
}
@@ -488,14 +792,21 @@
return notifier_to_errno(ret);
}
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+/**
+ * zs_create_pool - Creates an allocation pool to work from.
+ * @flags: allocation flags used to allocate pool metadata
+ *
+ * This function must be called before anything when using
+ * the zsmalloc allocator.
+ *
+ * On success, a pointer to the newly created pool is returned,
+ * otherwise NULL.
+ */
+struct zs_pool *zs_create_pool(gfp_t flags)
{
- int i, error, ovhd_size;
+ int i, ovhd_size;
struct zs_pool *pool;
- if (!name)
- return NULL;
-
ovhd_size = roundup(sizeof(*pool), PAGE_SIZE);
pool = kzalloc(ovhd_size, GFP_KERNEL);
if (!pool)
@@ -513,31 +824,11 @@
class->size = size;
class->index = i;
spin_lock_init(&class->lock);
- class->zspage_order = get_zspage_order(size);
+ class->pages_per_zspage = get_pages_per_zspage(size);
}
- /*
- * If this becomes a separate module, register zs_init with
- * module_init, and remove this block
- */
- if (!zs_initialized) {
- error = zs_init();
- if (error)
- goto cleanup;
- zs_initialized = 1;
- }
-
pool->flags = flags;
- pool->name = name;
-
- error = 0; /* Success */
-
-cleanup:
- if (error) {
- zs_destroy_pool(pool);
- pool = NULL;
- }
return pool;
}
@@ -553,8 +844,7 @@
for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
if (class->fullness_list[fg]) {
- pr_info("Freeing non-empty class with size "
- "%db, fullness group %d\n",
+ pr_info("Freeing non-empty class with size %db, fullness group %d\n",
class->size, fg);
}
}
@@ -567,18 +857,14 @@
* zs_malloc - Allocate block of given size from pool.
* @pool: pool to allocate from
* @size: size of block to allocate
- * @page: page no. that holds the object
- * @offset: location of object within page
*
- * On success, <page, offset> identifies block allocated
- * and 0 is returned. On failure, <page, offset> is set to
- * 0 and -ENOMEM is returned.
- *
+ * On success, handle to the allocated object is returned,
+ * otherwise 0.
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
-void *zs_malloc(struct zs_pool *pool, size_t size)
+unsigned long zs_malloc(struct zs_pool *pool, size_t size)
{
- void *obj;
+ unsigned long obj;
struct link_free *link;
int class_idx;
struct size_class *class;
@@ -587,7 +873,7 @@
unsigned long m_objidx, m_offset;
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
- return NULL;
+ return 0;
class_idx = get_size_class_index(size);
class = &pool->size_class[class_idx];
@@ -600,14 +886,14 @@
spin_unlock(&class->lock);
first_page = alloc_zspage(class, pool->flags);
if (unlikely(!first_page))
- return NULL;
+ return 0;
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
spin_lock(&class->lock);
- class->pages_allocated += class->zspage_order;
+ class->pages_allocated += class->pages_per_zspage;
}
- obj = first_page->freelist;
+ obj = (unsigned long)first_page->freelist;
obj_handle_to_location(obj, &m_page, &m_objidx);
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
@@ -626,7 +912,7 @@
}
EXPORT_SYMBOL_GPL(zs_malloc);
-void zs_free(struct zs_pool *pool, void *obj)
+void zs_free(struct zs_pool *pool, unsigned long obj)
{
struct link_free *link;
struct page *first_page, *f_page;
@@ -653,13 +939,13 @@
+ f_offset);
link->next = first_page->freelist;
kunmap_atomic(link);
- first_page->freelist = obj;
+ first_page->freelist = (void *)obj;
first_page->inuse--;
fullness = fix_fullness_group(pool, first_page);
if (fullness == ZS_EMPTY)
- class->pages_allocated -= class->zspage_order;
+ class->pages_allocated -= class->pages_per_zspage;
spin_unlock(&class->lock);
@@ -668,7 +954,22 @@
}
EXPORT_SYMBOL_GPL(zs_free);
-void *zs_map_object(struct zs_pool *pool, void *handle)
+/**
+ * zs_map_object - get address of allocated object from handle.
+ * @pool: pool from which the object was allocated
+ * @handle: handle returned from zs_malloc
+ *
+ * Before using an object allocated from zs_malloc, it must be mapped using
+ * this function. When done with the object, it must be unmapped using
+ * zs_unmap_object.
+ *
+ * Only one object can be mapped per cpu at a time. There is no protection
+ * against nested mappings.
+ *
+ * This function returns with preemption and page faults disabled.
+ */
+void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ enum zs_mapmode mm)
{
struct page *page;
unsigned long obj_idx, off;
@@ -677,38 +978,40 @@
enum fullness_group fg;
struct size_class *class;
struct mapping_area *area;
+ struct page *pages[2];
BUG_ON(!handle);
+ /*
+ * Because we use per-cpu mapping areas shared among the
+ * pools/users, we can't allow mapping in interrupt context
+ * because it can corrupt another users mappings.
+ */
+ BUG_ON(in_interrupt());
+
obj_handle_to_location(handle, &page, &obj_idx);
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
class = &pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size);
area = &get_cpu_var(zs_map_area);
+ area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
area->vm_addr = kmap_atomic(page);
- } else {
- /* this object spans two pages */
- struct page *nextp;
-
- nextp = get_next_page(page);
- BUG_ON(!nextp);
-
-
- set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL));
- set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL));
-
- /* We pre-allocated VM area so mapping can never fail */
- area->vm_addr = area->vm->addr;
+ return area->vm_addr + off;
}
- return area->vm_addr + off;
+ /* this object spans two pages */
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
+
+ return __zs_map_object(area, pages, off, class->size);
}
EXPORT_SYMBOL_GPL(zs_map_object);
-void zs_unmap_object(struct zs_pool *pool, void *handle)
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
struct page *page;
unsigned long obj_idx, off;
@@ -726,13 +1029,16 @@
off = obj_idx_to_offset(page, obj_idx, class->size);
area = &__get_cpu_var(zs_map_area);
- if (off + class->size <= PAGE_SIZE) {
+ if (off + class->size <= PAGE_SIZE)
kunmap_atomic(area->vm_addr);
- } else {
- set_pte(area->vm_ptes[0], __pte(0));
- set_pte(area->vm_ptes[1], __pte(0));
- __flush_tlb_one((unsigned long)area->vm_addr);
- __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE);
+ else {
+ struct page *pages[2];
+
+ pages[0] = page;
+ pages[1] = get_next_page(page);
+ BUG_ON(!pages[1]);
+
+ __zs_unmap_object(area, pages, off, class->size);
}
put_cpu_var(zs_map_area);
}
@@ -749,3 +1055,9 @@
return npages << PAGE_SHIFT;
}
EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
+
+module_init(zs_init);
+module_exit(zs_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
diff --git a/drivers/staging/zsmalloc/zsmalloc.h b/drivers/staging/zsmalloc/zsmalloc.h
index 949384e..fbe6bec 100644
--- a/drivers/staging/zsmalloc/zsmalloc.h
+++ b/drivers/staging/zsmalloc/zsmalloc.h
@@ -15,16 +15,28 @@
#include <linux/types.h>
+/*
+ * zsmalloc mapping modes
+ *
+ * NOTE: These only make a difference when a mapped object spans pages
+ */
+enum zs_mapmode {
+ ZS_MM_RW, /* normal read-write mapping */
+ ZS_MM_RO, /* read-only (no copy-out at unmap time) */
+ ZS_MM_WO /* write-only (no copy-in at map time) */
+};
+
struct zs_pool;
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(gfp_t flags);
void zs_destroy_pool(struct zs_pool *pool);
-void *zs_malloc(struct zs_pool *pool, size_t size);
-void zs_free(struct zs_pool *pool, void *obj);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size);
+void zs_free(struct zs_pool *pool, unsigned long obj);
-void *zs_map_object(struct zs_pool *pool, void *handle);
-void zs_unmap_object(struct zs_pool *pool, void *handle);
+void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ enum zs_mapmode mm);
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
u64 zs_get_total_size_bytes(struct zs_pool *pool);
diff --git a/drivers/staging/zsmalloc/zsmalloc_int.h b/drivers/staging/zsmalloc/zsmalloc_int.h
deleted file mode 100644
index 92eefc6..0000000
--- a/drivers/staging/zsmalloc/zsmalloc_int.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * zsmalloc memory allocator
- *
- * Copyright (C) 2011 Nitin Gupta
- *
- * This code is released using a dual license strategy: BSD/GPL
- * You can choose the license that better fits your requirements.
- *
- * Released under the terms of 3-clause BSD License
- * Released under the terms of GNU General Public License Version 2.0
- */
-
-#ifndef _ZS_MALLOC_INT_H_
-#define _ZS_MALLOC_INT_H_
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-/*
- * This must be power of 2 and greater than of equal to sizeof(link_free).
- * These two conditions ensure that any 'struct link_free' itself doesn't
- * span more than 1 page which avoids complex case of mapping 2 pages simply
- * to restore link_free pointer values.
- */
-#define ZS_ALIGN 8
-
-/*
- * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
- * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
- */
-#define ZS_MAX_ZSPAGE_ORDER 2
-#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
-
-/*
- * Object location (<PFN>, <obj_idx>) is encoded as
- * as single (void *) handle value.
- *
- * Note that object index <obj_idx> is relative to system
- * page <PFN> it is stored in, so for each sub-page belonging
- * to a zspage, obj_idx starts with 0.
- *
- * This is made more complicated by various memory models and PAE.
- */
-
-#ifndef MAX_PHYSMEM_BITS
-#ifdef CONFIG_HIGHMEM64G
-#define MAX_PHYSMEM_BITS 36
-#else /* !CONFIG_HIGHMEM64G */
-/*
- * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
- * be PAGE_SHIFT
- */
-#define MAX_PHYSMEM_BITS BITS_PER_LONG
-#endif
-#endif
-#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
-#define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS)
-#define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
-
-#define MAX(a, b) ((a) >= (b) ? (a) : (b))
-/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
-#define ZS_MIN_ALLOC_SIZE \
- MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
-#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
-
-/*
- * On systems with 4K page size, this gives 254 size classes! There is a
- * trader-off here:
- * - Large number of size classes is potentially wasteful as free page are
- * spread across these classes
- * - Small number of size classes causes large internal fragmentation
- * - Probably its better to use specific size classes (empirically
- * determined). NOTE: all those class sizes must be set as multiple of
- * ZS_ALIGN to make sure link_free itself never has to span 2 pages.
- *
- * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
- * (reason above)
- */
-#define ZS_SIZE_CLASS_DELTA 16
-#define ZS_SIZE_CLASSES ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / \
- ZS_SIZE_CLASS_DELTA + 1)
-
-/*
- * We do not maintain any list for completely empty or full pages
- */
-enum fullness_group {
- ZS_ALMOST_FULL,
- ZS_ALMOST_EMPTY,
- _ZS_NR_FULLNESS_GROUPS,
-
- ZS_EMPTY,
- ZS_FULL
-};
-
-/*
- * We assign a page to ZS_ALMOST_EMPTY fullness group when:
- * n <= N / f, where
- * n = number of allocated objects
- * N = total number of objects zspage can store
- * f = 1/fullness_threshold_frac
- *
- * Similarly, we assign zspage to:
- * ZS_ALMOST_FULL when n > N / f
- * ZS_EMPTY when n == 0
- * ZS_FULL when n == N
- *
- * (see: fix_fullness_group())
- */
-static const int fullness_threshold_frac = 4;
-
-struct mapping_area {
- struct vm_struct *vm;
- pte_t *vm_ptes[2];
- char *vm_addr;
-};
-
-struct size_class {
- /*
- * Size of objects stored in this class. Must be multiple
- * of ZS_ALIGN.
- */
- int size;
- unsigned int index;
-
- /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
- int zspage_order;
-
- spinlock_t lock;
-
- /* stats */
- u64 pages_allocated;
-
- struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
-};
-
-/*
- * Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
- *
- * This must be power of 2 and less than or equal to ZS_ALIGN
- */
-struct link_free {
- /* Handle of next free chunk (encodes <PFN, obj_idx>) */
- void *next;
-};
-
-struct zs_pool {
- struct size_class size_class[ZS_SIZE_CLASSES];
-
- gfp_t flags; /* allocation flags used when growing pool */
- const char *name;
-};
-
-#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a5fa304..2218ac4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -806,6 +806,17 @@
return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
}
+extern struct address_space *__page_file_mapping(struct page *);
+
+static inline
+struct address_space *page_file_mapping(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return __page_file_mapping(page);
+
+ return page->mapping;
+}
+
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -822,6 +833,20 @@
return page->index;
}
+extern pgoff_t __page_file_index(struct page *page);
+
+/*
+ * Return the file index of the page. Regular pagecache pages use ->index
+ * whereas swapcache pages use swp_offset(->private)
+ */
+static inline pgoff_t page_file_index(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return __page_file_index(page);
+
+ return page->index;
+}
+
/*
* Return true if this page is mapped into pagetables.
*/
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0844dc3..696ca39 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -295,6 +295,11 @@
return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
}
+static inline loff_t page_file_offset(struct page *page)
+{
+ return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+}
+
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d5bd6ee..036b107 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -351,6 +351,8 @@
extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
+extern int page_swapcount(struct page *);
+extern struct swap_info_struct *page_swap_info(struct page *);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index fafc26d..a6c07fd 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -31,6 +31,7 @@
#include <linux/memcontrol.h>
#include <linux/poll.h>
#include <linux/oom.h>
+#include <linux/export.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -601,7 +602,7 @@
* This does not give an exact answer when swap count is continued,
* but does include the high COUNT_CONTINUED flag to allow for that.
*/
-static inline int page_swapcount(struct page *page)
+int page_swapcount(struct page *page)
{
int count = 0;
struct swap_info_struct *p;
@@ -2292,6 +2293,31 @@
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
+struct swap_info_struct *page_swap_info(struct page *page)
+{
+ swp_entry_t swap = { .val = page_private(page) };
+ BUG_ON(!PageSwapCache(page));
+ return swap_info[swp_type(swap)];
+}
+
+/*
+ * out-of-line __page_file_ methods to avoid include hell.
+ */
+struct address_space *__page_file_mapping(struct page *page)
+{
+ VM_BUG_ON(!PageSwapCache(page));
+ return page_swap_info(page)->swap_file->f_mapping;
+}
+EXPORT_SYMBOL_GPL(__page_file_mapping);
+
+pgoff_t __page_file_index(struct page *page)
+{
+ swp_entry_t swap = { .val = page_private(page) };
+ VM_BUG_ON(!PageSwapCache(page));
+ return swp_offset(swap);
+}
+EXPORT_SYMBOL_GPL(__page_file_index);
+
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index da99254..aaa132e 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -3154,13 +3154,27 @@
struct snd_soc_dai *dai)
{
struct wcd9xxx *tapan_core = dev_get_drvdata(dai->codec->dev->parent);
+ struct tapan_priv *tapan = snd_soc_codec_get_drvdata(dai->codec);
+ u32 active = 0;
+
dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
__func__, substream->name, substream->stream);
+
+ if (dai->id <= NUM_CODEC_DAIS) {
+ if (tapan->dai[dai->id].ch_mask) {
+ active = 1;
+ dev_dbg(dai->codec->dev, "%s(): Codec DAI: chmask[%d] = 0x%lx\n",
+ __func__, dai->id,
+ tapan->dai[dai->id].ch_mask);
+ }
+ }
if ((tapan_core != NULL) &&
(tapan_core->dev != NULL) &&
- (tapan_core->dev->parent != NULL)) {
+ (tapan_core->dev->parent != NULL) &&
+ (active == 0)) {
pm_runtime_mark_last_busy(tapan_core->dev->parent);
pm_runtime_put(tapan_core->dev->parent);
+ dev_dbg(dai->codec->dev, "%s: unvote requested", __func__);
}
}
@@ -3920,6 +3934,13 @@
dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
__func__, ret);
}
+ if ((core != NULL) &&
+ (core->dev != NULL) &&
+ (core->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(core->dev->parent);
+ pm_runtime_put(core->dev->parent);
+ dev_dbg(codec->dev, "%s: unvote requested", __func__);
+ }
break;
}
return ret;
@@ -3967,6 +3988,13 @@
dev_dbg(codec->dev, "%s: Disconnect RX port, ret = %d\n",
__func__, ret);
}
+ if ((core != NULL) &&
+ (core->dev != NULL) &&
+ (core->dev->parent != NULL)) {
+ pm_runtime_mark_last_busy(core->dev->parent);
+ pm_runtime_put(core->dev->parent);
+ dev_dbg(codec->dev, "%s: unvote requested", __func__);
+ }
break;
}
return ret;
diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c
index d726af2..ec99c5f 100644
--- a/sound/soc/codecs/wcd9xxx-mbhc.c
+++ b/sound/soc/codecs/wcd9xxx-mbhc.c
@@ -1302,6 +1302,7 @@
const struct wcd9xxx_mbhc_plug_type_cfg *plug_type =
WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration);
s16 hs_max, no_mic, dce_z;
+ int highhph_cnt = 0;
pr_debug("%s: enter\n", __func__);
pr_debug("%s: event_state 0x%lx\n", __func__, event_state);
@@ -1326,9 +1327,10 @@
d->_vdces = vdce;
if (d->_vdces < no_mic)
d->_type = PLUG_TYPE_HEADPHONE;
- else if (d->_vdces >= hs_max)
+ else if (d->_vdces >= hs_max) {
d->_type = PLUG_TYPE_HIGH_HPH;
- else
+ highhph_cnt++;
+ } else
d->_type = PLUG_TYPE_HEADSET;
pr_debug("%s: DCE #%d, %04x, V %04d(%04d), HPHL %d TYPE %d\n",
@@ -1363,7 +1365,8 @@
goto exit;
}
- delta_thr = highhph ? WCD9XXX_MB_MEAS_DELTA_MAX_MV :
+ delta_thr = ((highhph_cnt == sz) || highhph) ?
+ WCD9XXX_MB_MEAS_DELTA_MAX_MV :
WCD9XXX_CS_MEAS_DELTA_MAX_MV;
for (i = 0, d = dt; i < sz; i++, d++) {
@@ -2855,6 +2858,10 @@
if (wcd9xxx_cancel_btn_work(mbhc))
pr_debug("%s: button press is canceled\n", __func__);
+ /* cancel detect plug */
+ wcd9xxx_cancel_hs_detect_plug(mbhc,
+ &mbhc->correct_plug_swch);
+
insert = !wcd9xxx_swch_level_remove(mbhc);
pr_debug("%s: Current plug type %d, insert %d\n", __func__,
mbhc->current_plug, insert);
@@ -2862,9 +2869,6 @@
mbhc->lpi_enabled = false;
wmb();
- /* cancel detect plug */
- wcd9xxx_cancel_hs_detect_plug(mbhc,
- &mbhc->correct_plug_swch);
if ((mbhc->current_plug != PLUG_TYPE_NONE) &&
!(snd_soc_read(codec, WCD9XXX_A_MBHC_INSERT_DETECT) &
(1 << 1)))
@@ -2879,10 +2883,6 @@
mbhc->lpi_enabled = false;
wmb();
- /* cancel detect plug */
- wcd9xxx_cancel_hs_detect_plug(mbhc,
- &mbhc->correct_plug_swch);
-
if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) {
wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADPHONE);
is_removed = true;
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index d7ddca2..7f8736c 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -58,16 +58,9 @@
#define LO_2_SPK_AMP 0x4
#define LO_4_SPK_AMP 0x8
-#define LPAIF_OFFSET 0xFE000000
-#define LPAIF_PRI_MODE_MUXSEL (LPAIF_OFFSET + 0x2B000)
-#define LPAIF_SEC_MODE_MUXSEL (LPAIF_OFFSET + 0x2C000)
-#define LPAIF_TER_MODE_MUXSEL (LPAIF_OFFSET + 0x2D000)
-#define LPAIF_QUAD_MODE_MUXSEL (LPAIF_OFFSET + 0x2E000)
-
#define I2S_PCM_SEL 1
#define I2S_PCM_SEL_OFFSET 1
-
#define WCD9XXX_MBHC_DEF_BUTTONS 8
#define WCD9XXX_MBHC_DEF_RLOADS 5
#define TAIKO_EXT_CLK_RATE 9600000
@@ -146,6 +139,7 @@
struct msm_auxpcm_ctrl {
struct msm_auxpcm_gpio *pin_data;
u32 cnt;
+ void __iomem *mux;
};
struct msm8974_asoc_mach_data {
@@ -173,9 +167,6 @@
{"SEC_AUXPCM_DOUT", "qcom,sec-auxpcm-gpio-dout"},
};
-void *lpaif_pri_muxsel_virt_addr;
-void *lpaif_sec_muxsel_virt_addr;
-
struct msm8974_liquid_dock_dev {
int dock_plug_gpio;
int dock_plug_irq;
@@ -1192,12 +1183,14 @@
goto err;
}
if (atomic_inc_return(&prim_auxpcm_rsc_ref) == 1) {
- if (lpaif_pri_muxsel_virt_addr != NULL)
+ if (auxpcm_ctrl->mux != NULL) {
iowrite32(I2S_PCM_SEL << I2S_PCM_SEL_OFFSET,
- lpaif_pri_muxsel_virt_addr);
- else
- pr_err("%s lpaif_pri_muxsel_virt_addr is NULL\n",
- __func__);
+ auxpcm_ctrl->mux);
+ } else {
+ pr_err("%s: Pri AUXPCM MUX addr is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
ret = msm_aux_pcm_get_gpios(auxpcm_ctrl);
}
if (ret < 0) {
@@ -1247,12 +1240,14 @@
goto err;
}
if (atomic_inc_return(&sec_auxpcm_rsc_ref) == 1) {
- if (lpaif_sec_muxsel_virt_addr != NULL)
+ if (auxpcm_ctrl->mux != NULL) {
iowrite32(I2S_PCM_SEL << I2S_PCM_SEL_OFFSET,
- lpaif_sec_muxsel_virt_addr);
- else
- pr_err("%s lpaif_sec_muxsel_virt_addr is NULL\n",
- __func__);
+ auxpcm_ctrl->mux);
+ } else {
+ pr_err("%s Sec AUXPCM MUX addr is NULL\n", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
ret = msm_aux_pcm_get_gpios(auxpcm_ctrl);
}
if (ret < 0) {
@@ -2667,6 +2662,8 @@
int ret;
const char *auxpcm_pri_gpio_set = NULL;
const char *prop_name_ult_lo_gpio = "qcom,ext-ult-lo-amp-gpio";
+ struct resource *pri_muxsel;
+ struct resource *sec_muxsel;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No platform supplied from device tree\n");
@@ -2794,7 +2791,6 @@
}
}
-
pdata->us_euro_gpio = of_get_named_gpio(pdev->dev.of_node,
"qcom,us-euro-gpios", 0);
if (pdata->us_euro_gpio < 0) {
@@ -2821,28 +2817,49 @@
goto err1;
}
if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-prim")) {
- lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_PRI_MODE_MUXSEL, 4);
+ pri_muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lpaif_pri_mode_muxsel");
} else if (!strcmp(auxpcm_pri_gpio_set, "prim-gpio-tert")) {
- lpaif_pri_muxsel_virt_addr = ioremap(LPAIF_TER_MODE_MUXSEL, 4);
+ pri_muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lpaif_tert_mode_muxsel");
} else {
dev_err(&pdev->dev, "Invalid value %s for AUXPCM GPIO set\n",
auxpcm_pri_gpio_set);
ret = -EINVAL;
goto err1;
}
- if (lpaif_pri_muxsel_virt_addr == NULL) {
- pr_err("%s Pri muxsel virt addr is null\n", __func__);
- ret = -EINVAL;
- goto err1;
+ if (!pri_muxsel) {
+ dev_err(&pdev->dev, "MUX addr invalid for primary AUXPCM\n");
+ ret = -ENODEV;
+ goto err1;
+ } else {
+ pdata->pri_auxpcm_ctrl->mux = ioremap(pri_muxsel->start,
+ resource_size(pri_muxsel));
+ if (pdata->pri_auxpcm_ctrl->mux == NULL) {
+ pr_err("%s Pri muxsel virt addr is null\n", __func__);
+ ret = -EINVAL;
+ goto err1;
+ }
}
- lpaif_sec_muxsel_virt_addr = ioremap(LPAIF_SEC_MODE_MUXSEL, 4);
- if (lpaif_sec_muxsel_virt_addr == NULL) {
+
+ sec_muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lpaif_sec_mode_muxsel");
+ if (!sec_muxsel) {
+ dev_err(&pdev->dev, "MUX addr invalid for secondary AUXPCM\n");
+ ret = -ENODEV;
+ goto err2;
+ }
+ pdata->sec_auxpcm_ctrl->mux = ioremap(sec_muxsel->start,
+ resource_size(sec_muxsel));
+ if (pdata->sec_auxpcm_ctrl->mux == NULL) {
pr_err("%s Sec muxsel virt addr is null\n", __func__);
ret = -EINVAL;
- goto err1;
+ goto err2;
}
return 0;
+err2:
+ iounmap(pdata->pri_auxpcm_ctrl->mux);
err1:
if (ext_ult_lo_amp_gpio >= 0)
gpio_free(ext_ult_lo_amp_gpio);
@@ -2896,8 +2913,8 @@
msm8974_liquid_dock_dev = NULL;
}
- iounmap(lpaif_pri_muxsel_virt_addr);
- iounmap(lpaif_sec_muxsel_virt_addr);
+ iounmap(pdata->pri_auxpcm_ctrl->mux);
+ iounmap(pdata->sec_auxpcm_ctrl->mux);
snd_soc_unregister_card(card);
return 0;
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
index 93b3597..4a25606 100644
--- a/sound/soc/msm/qdsp6v2/audio_ocmem.c
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -879,6 +879,15 @@
pr_debug("%s\n", __func__);
+ audio_ocmem_lcl.audio_hdl = ocmem_notifier_register(OCMEM_LP_AUDIO,
+ &audio_ocmem_client_nb);
+ if (PTR_RET(audio_ocmem_lcl.audio_hdl) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (!audio_ocmem_lcl.audio_hdl) {
+ pr_err("%s: Failed to get ocmem handle %d\n", __func__,
+ OCMEM_LP_AUDIO);
+ return -ENODEV;
+ }
subsys_notif_register_notifier("adsp", &anb);
audio_ocmem_lcl.ocmem_dump_addr =
@@ -944,12 +953,6 @@
ret = -EFAULT;
goto destroy_voice_wq;
}
- audio_ocmem_lcl.audio_hdl = ocmem_notifier_register(OCMEM_LP_AUDIO,
- &audio_ocmem_client_nb);
- if (audio_ocmem_lcl.audio_hdl == NULL) {
- pr_err("%s: Failed to get ocmem handle %d\n", __func__,
- OCMEM_LP_AUDIO);
- }
audio_ocmem_lcl.lp_memseg_ptr = NULL;
return 0;
destroy_voice_wq: