From 70a640c0efa7667453c3911b13335304ce46ad8b Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 26 Apr 2023 15:35:45 +0200 Subject: [PATCH 001/303] regmap: REGMAP_KUNIT should not select REGMAP Enabling a (modular) test should not silently enable additional kernel functionality, as that may increase the attack vector of a product. Fix this by: 1. making REGMAP visible if CONFIG_KUNIT_ALL_TESTS is enabled, 2. making REGMAP_KUNIT depend on REGMAP instead of selecting it. After this, one can safely enable CONFIG_KUNIT_ALL_TESTS=m to build modules for all appropriate tests for ones system, without pulling in extra unwanted functionality, while still allowing a tester to manually enable REGMAP and its test suite on a system where REGMAP is not enabled by default. Fixes: 2238959b6ad27040 ("regmap: Add some basic kunit tests") Signed-off-by: Geert Uytterhoeven Date: Tue, 2 May 2023 12:38:09 +0200 Subject: [PATCH 002/303] phy: qcom-qmp-combo: fix init-count imbalance The init counter is not decremented on initialisation errors, which prevents retrying initialisation and can lead to the runtime suspend callback attempting to disable resources that have never been enabled. Add the missing decrement on initialisation errors so that the counter reflects the state of the device. Fixes: e78f3d15e115 ("phy: qcom-qmp: new qmp phy driver for qcom-chipsets") Cc: stable@vger.kernel.org # 4.12 Signed-off-by: Johan Hovold Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20230502103810.12061-2-johan+linaro@kernel.org Signed-off-by: Vinod Koul --- drivers/phy/qualcomm/phy-qcom-qmp-combo.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 6850e04c329b8..87b17e5877ab8 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -2472,7 +2472,7 @@ static int qmp_combo_com_init(struct qmp_combo *qmp) ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); - goto err_unlock; + goto err_decrement_count; } ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); @@ -2522,7 +2522,8 @@ static int qmp_combo_com_init(struct qmp_combo *qmp) reset_control_bulk_assert(cfg->num_resets, qmp->resets); err_disable_regulators: regulator_bulk_disable(cfg->num_vregs, qmp->vregs); -err_unlock: +err_decrement_count: + qmp->init_count--; mutex_unlock(&qmp->phy_mutex); return ret; From e42f110700ed7293700c26145e1ed07ea05ac3f6 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 2 May 2023 12:38:10 +0200 Subject: [PATCH 003/303] phy: qcom-qmp-pcie-msm8996: fix init-count imbalance The init counter is not decremented on initialisation errors, which prevents retrying initialisation. Add the missing decrement on initialisation errors so that the counter reflects the state of the device. Fixes: e78f3d15e115 ("phy: qcom-qmp: new qmp phy driver for qcom-chipsets") Cc: stable@vger.kernel.org # 4.12 Signed-off-by: Johan Hovold Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20230502103810.12061-3-johan+linaro@kernel.org Signed-off-by: Vinod Koul --- drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c index 09824be088c96..0c603bc06e099 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c @@ -379,7 +379,7 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy) ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); - goto err_unlock; + goto err_decrement_count; } ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); @@ -409,7 +409,8 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy) reset_control_bulk_assert(cfg->num_resets, qmp->resets); err_disable_regulators: regulator_bulk_disable(cfg->num_vregs, qmp->vregs); -err_unlock: +err_decrement_count: + qmp->init_count--; mutex_unlock(&qmp->phy_mutex); return ret; From 08c7f09356e45d093d1867c7a3c6ac6526e2f98b Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Sun, 7 May 2023 11:29:29 -0700 Subject: [PATCH 004/303] RDMA/bnxt_re: Fix the page_size used during the MR creation Driver populates the list of pages used for Memory region wrongly when page size is more than system page size. This is causing a failure when some of the applications that creates MR with page size as 2M. Since HW can support multiple page sizes, pass the correct page size while creating the MR. Also, driver need not adjust the number of pages when HW Queues are created with user memory. It should work with the number of dma blocks returned by ib_umem_num_dma_blocks. Fix this calculation also. Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation") Fixes: f6919d56388c ("RDMA/bnxt_re: Code refactor while populating user MRs") Link: https://lore.kernel.org/r/1683484169-9539-1-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Kalesh AP Signed-off-by: Kashyap Desai Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_res.c | 12 ++---------- drivers/infiniband/hw/bnxt_re/qplib_sp.c | 7 +++---- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 126d4f26f75ad..81b0c5e879f9e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -215,17 +215,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, return -EINVAL; hwq_attr->sginfo->npages = npages; } else { - unsigned long sginfo_num_pages = ib_umem_num_dma_blocks( - hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize); - + npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem, + hwq_attr->sginfo->pgsize); hwq->is_user = true; - npages = sginfo_num_pages; - npages = (npages * PAGE_SIZE) / - BIT_ULL(hwq_attr->sginfo->pgshft); - if ((sginfo_num_pages * PAGE_SIZE) % - BIT_ULL(hwq_attr->sginfo->pgshft)) - if (!npages) - npages++; } if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 1714a1e231132..b967a17a44beb 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -617,16 +617,15 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, /* Free the hwq if it already exist, must be a rereg */ if (mr->hwq.max_elements) bnxt_qplib_free_hwq(res, &mr->hwq); - /* Use system PAGE_SIZE */ hwq_attr.res = res; hwq_attr.depth = pages; - hwq_attr.stride = buf_pg_size; + hwq_attr.stride = sizeof(dma_addr_t); hwq_attr.type = HWQ_TYPE_MR; hwq_attr.sginfo = &sginfo; hwq_attr.sginfo->umem = umem; hwq_attr.sginfo->npages = pages; - hwq_attr.sginfo->pgsize = PAGE_SIZE; - hwq_attr.sginfo->pgshft = PAGE_SHIFT; + hwq_attr.sginfo->pgsize = buf_pg_size; + hwq_attr.sginfo->pgshft = ilog2(buf_pg_size); rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); if (rc) { dev_err(&res->pdev->dev, From 78b6a9a3f445e121ca08195084206cb8faa6999f Mon Sep 17 00:00:00 2001 From: Haoyue Xu Date: Sat, 6 May 2023 15:06:04 +0800 Subject: [PATCH 005/303] MAINTAINERS: Update maintainers of HiSilicon RoCE Wenpeng has moved to other technical areas, and Junxian will take over his responsibilities in maintaining this module. Link: https://lore.kernel.org/r/20230506070604.2982542-1-xuhaoyue1@hisilicon.com Signed-off-by: Haoyue Xu Signed-off-by: Jason Gunthorpe --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 7e0b87d5aa2e5..c560437378483 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9360,7 +9360,7 @@ F: drivers/crypto/hisilicon/zip/ HISILICON ROCE DRIVER M: Haoyue Xu -M: Wenpeng Liang +M: Junxian Huang L: linux-rdma@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt From 20f291b88ecf23f674ee2ed980a4d93b7f16a06f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Feb 2023 18:47:30 +0300 Subject: [PATCH 006/303] iio: adc: imx93: fix a signedness bug in imx93_adc_read_raw() The problem is these lines: ret = vref_uv = regulator_get_voltage(adc->vref); if (ret < 0) The "ret" variable is type long and "vref_uv" is u32 so that means the condition can never be true on a 64bit system. A negative error code from regulator_get_voltage() would be cast to a high positive u32 value and then remain a high positive value when cast to a long. The "ret" variable only ever stores ints so it should be declared as an int. We can delete the "vref_uv" variable and use "ret" directly. Fixes: 7d02296ac8b8 ("iio: adc: add imx93 adc support") Signed-off-by: Dan Carpenter Reviewed-by: Haibo Chen Link: https://lore.kernel.org/r/Y+utEvjfjQRQo2QB@kili Signed-off-by: Jonathan Cameron --- drivers/iio/adc/imx93_adc.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c index a775d2e405671..dce9ec91e4a77 100644 --- a/drivers/iio/adc/imx93_adc.c +++ b/drivers/iio/adc/imx93_adc.c @@ -236,8 +236,7 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev, { struct imx93_adc *adc = iio_priv(indio_dev); struct device *dev = adc->dev; - long ret; - u32 vref_uv; + int ret; switch (mask) { case IIO_CHAN_INFO_RAW: @@ -253,10 +252,10 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: - ret = vref_uv = regulator_get_voltage(adc->vref); + ret = regulator_get_voltage(adc->vref); if (ret < 0) return ret; - *val = vref_uv / 1000; + *val = ret / 1000; *val2 = 12; return IIO_VAL_FRACTIONAL_LOG2; From 00ffdd6fa90298522d45ca0c348b23485584dcdc Mon Sep 17 00:00:00 2001 From: ChiaEn Wu Date: Mon, 10 Apr 2023 18:34:22 +0800 Subject: [PATCH 007/303] iio: adc: mt6370: Fix ibus and ibat scaling value of some specific vendor ID chips The scale value of ibus and ibat on the datasheet is incorrect due to the customer report after the experimentation with some specific vendor ID chips. Fixes: c1404d1b659f ("iio: adc: mt6370: Add MediaTek MT6370 support") Signed-off-by: ChiaEn Wu Reviewed-by: Alexandre Mergnat Link: https://lore.kernel.org/r/1681122862-1994-1-git-send-email-chiaen_wu@richtek.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/mt6370-adc.c | 53 ++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/drivers/iio/adc/mt6370-adc.c b/drivers/iio/adc/mt6370-adc.c index bc62e5a9d50d1..0bc112135bca1 100644 --- a/drivers/iio/adc/mt6370-adc.c +++ b/drivers/iio/adc/mt6370-adc.c @@ -19,6 +19,7 @@ #include +#define MT6370_REG_DEV_INFO 0x100 #define MT6370_REG_CHG_CTRL3 0x113 #define MT6370_REG_CHG_CTRL7 0x117 #define MT6370_REG_CHG_ADC 0x121 @@ -27,6 +28,7 @@ #define MT6370_ADC_START_MASK BIT(0) #define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4) #define MT6370_AICR_ICHG_MASK GENMASK(7, 2) +#define MT6370_VENID_MASK GENMASK(7, 4) #define MT6370_AICR_100_mA 0x0 #define MT6370_AICR_150_mA 0x1 @@ -47,6 +49,10 @@ #define ADC_CONV_TIME_MS 35 #define ADC_CONV_POLLING_TIME_US 1000 +#define MT6370_VID_RT5081 0x8 +#define MT6370_VID_RT5081A 0xA +#define MT6370_VID_MT6370 0xE + struct mt6370_adc_data { struct device *dev; struct regmap *regmap; @@ -55,6 +61,7 @@ struct mt6370_adc_data { * from being read at the same time. */ struct mutex adc_lock; + unsigned int vid; }; static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan, @@ -98,6 +105,30 @@ static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan, return ret; } +static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv) +{ + switch (priv->vid) { + case MT6370_VID_RT5081: + case MT6370_VID_RT5081A: + case MT6370_VID_MT6370: + return 3350; + default: + return 3875; + } +} + +static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv) +{ + switch (priv->vid) { + case MT6370_VID_RT5081: + case MT6370_VID_RT5081A: + case MT6370_VID_MT6370: + return 2680; + default: + return 3870; + } +} + static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, int chan, int *val1, int *val2) { @@ -123,7 +154,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, case MT6370_AICR_250_mA: case MT6370_AICR_300_mA: case MT6370_AICR_350_mA: - *val1 = 3350; + *val1 = mt6370_adc_get_ibus_scale(priv); break; default: *val1 = 5000; @@ -150,7 +181,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, case MT6370_ICHG_600_mA: case MT6370_ICHG_700_mA: case MT6370_ICHG_800_mA: - *val1 = 2680; + *val1 = mt6370_adc_get_ibat_scale(priv); break; default: *val1 = 5000; @@ -251,6 +282,20 @@ static const struct iio_chan_spec mt6370_adc_channels[] = { MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)), }; +static int mt6370_get_vendor_info(struct mt6370_adc_data *priv) +{ + unsigned int dev_info; + int ret; + + ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info); + if (ret) + return ret; + + priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info); + + return 0; +} + static int mt6370_adc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -272,6 +317,10 @@ static int mt6370_adc_probe(struct platform_device *pdev) priv->regmap = regmap; mutex_init(&priv->adc_lock); + ret = mt6370_get_vendor_info(priv); + if (ret) + return dev_err_probe(dev, ret, "Failed to get vid\n"); + ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0); if (ret) return dev_err_probe(dev, ret, "Failed to reset ADC\n"); From 265c82ea8b172129cb6d4eff41af856c3aff6168 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Thu, 13 Apr 2023 18:37:52 -0700 Subject: [PATCH 008/303] iio: tmag5273: Fix runtime PM leak on measurement error The tmag5273 gets a runtime PM reference before reading a measurement and releases it when done. But if the measurement fails the tmag5273_read_raw() function exits before releasing the reference. Make sure that this error path also releases the runtime PM reference. Fixes: 866a1389174b ("iio: magnetometer: add ti tmag5273 driver") Signed-off-by: Lars-Peter Clausen Acked-by: Gerald Loacker Reviewed-by: Nuno Sa Link: https://lore.kernel.org/r/20230414013752.498767-1-lars@metafoo.de Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/magnetometer/tmag5273.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c index 28bb7efe8df8c..e155a75b3cd29 100644 --- a/drivers/iio/magnetometer/tmag5273.c +++ b/drivers/iio/magnetometer/tmag5273.c @@ -296,12 +296,13 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev, return ret; ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude); - if (ret) - return ret; pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); + if (ret) + return ret; + switch (chan->address) { case TEMPERATURE: *val = t; From 28f73ded19d403697f87473c9b85a27eb8ed9cf2 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Fri, 14 Apr 2023 08:07:02 -0700 Subject: [PATCH 009/303] iio: ad4130: Make sure clock provider gets removed The ad4130 driver registers a clock provider, but never removes it. This leaves a stale clock provider behind that references freed clocks when the device is unbound. Register a managed action to remove the clock provider when the device is removed. Fixes: 62094060cf3a ("iio: adc: ad4130: add AD4130 driver") Signed-off-by: Lars-Peter Clausen Link: https://lore.kernel.org/r/20230414150702.518441-1-lars@metafoo.de Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad4130.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c index 38394341fd6e7..5a5dd5e87ffc4 100644 --- a/drivers/iio/adc/ad4130.c +++ b/drivers/iio/adc/ad4130.c @@ -1817,6 +1817,11 @@ static const struct clk_ops ad4130_int_clk_ops = { .unprepare = ad4130_int_clk_unprepare, }; +static void ad4130_clk_del_provider(void *of_node) +{ + of_clk_del_provider(of_node); +} + static int ad4130_setup_int_clk(struct ad4130_state *st) { struct device *dev = &st->spi->dev; @@ -1824,6 +1829,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st) struct clk_init_data init; const char *clk_name; struct clk *clk; + int ret; if (st->int_pin_sel == AD4130_INT_PIN_CLK || st->mclk_sel != AD4130_MCLK_76_8KHZ) @@ -1843,7 +1849,11 @@ static int ad4130_setup_int_clk(struct ad4130_state *st) if (IS_ERR(clk)) return PTR_ERR(clk); - return of_clk_add_provider(of_node, of_clk_src_simple_get, clk); + ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node); } static int ad4130_setup(struct iio_dev *indio_dev) From 27b2ed5b6d53cd62fc61c3f259ae52f5cac23b66 Mon Sep 17 00:00:00 2001 From: Jiakai Luo Date: Sat, 22 Apr 2023 06:34:06 -0700 Subject: [PATCH 010/303] iio: adc: mxs-lradc: fix the order of two cleanup operations Smatch reports: drivers/iio/adc/mxs-lradc-adc.c:766 mxs_lradc_adc_probe() warn: missing unwind goto? the order of three init operation: 1.mxs_lradc_adc_trigger_init 2.iio_triggered_buffer_setup 3.mxs_lradc_adc_hw_init thus, the order of three cleanup operation should be: 1.mxs_lradc_adc_hw_stop 2.iio_triggered_buffer_cleanup 3.mxs_lradc_adc_trigger_remove we exchange the order of two cleanup operations, introducing the following differences: 1.if mxs_lradc_adc_trigger_init fails, returns directly; 2.if trigger_init succeeds but iio_triggered_buffer_setup fails, goto err_trig and remove the trigger. In addition, we also reorder the unwind that goes on in the remove() callback to match the new ordering. Fixes: 6dd112b9f85e ("iio: adc: mxs-lradc: Add support for ADC driver") Signed-off-by: Jiakai Luo Reviewed-by: Dongliang Mu Link: https://lore.kernel.org/r/20230422133407.72908-1-jkluo@hust.edu.cn Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/mxs-lradc-adc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c index bca79a93cbe43..a50f39143d3ea 100644 --- a/drivers/iio/adc/mxs-lradc-adc.c +++ b/drivers/iio/adc/mxs-lradc-adc.c @@ -757,13 +757,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev) ret = mxs_lradc_adc_trigger_init(iio); if (ret) - goto err_trig; + return ret; ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time, &mxs_lradc_adc_trigger_handler, &mxs_lradc_adc_buffer_ops); if (ret) - return ret; + goto err_trig; adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc]; @@ -801,9 +801,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev) err_dev: mxs_lradc_adc_hw_stop(adc); - mxs_lradc_adc_trigger_remove(iio); -err_trig: iio_triggered_buffer_cleanup(iio); +err_trig: + mxs_lradc_adc_trigger_remove(iio); return ret; } @@ -814,8 +814,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev) iio_device_unregister(iio); mxs_lradc_adc_hw_stop(adc); - mxs_lradc_adc_trigger_remove(iio); iio_triggered_buffer_cleanup(iio); + mxs_lradc_adc_trigger_remove(iio); return 0; } From 279c3a2a5eb2c1054fbe38e5c33be08584229047 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 21 Apr 2023 13:41:56 +0300 Subject: [PATCH 011/303] iio: adc: palmas: fix off by one bugs Valid values for "adc_chan" are zero to (PALMAS_ADC_CH_MAX - 1). Smatch detects some buffer overflows caused by this: drivers/iio/adc/palmas_gpadc.c:721 palmas_gpadc_read_event_value() error: buffer overflow 'adc->thresholds' 16 <= 16 drivers/iio/adc/palmas_gpadc.c:758 palmas_gpadc_write_event_value() error: buffer overflow 'adc->thresholds' 16 <= 16 The effect of this bug in other functions is more complicated but obviously we should fix all of them. Fixes: a99544c6c883 ("iio: adc: palmas: add support for iio threshold events") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/14fee94a-7db7-4371-b7d6-e94d86b9561e@kili.mountain Signed-off-by: Jonathan Cameron --- drivers/iio/adc/palmas_gpadc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c index c1c439215aeb5..7dfc9c927a23f 100644 --- a/drivers/iio/adc/palmas_gpadc.c +++ b/drivers/iio/adc/palmas_gpadc.c @@ -547,7 +547,7 @@ static int palmas_gpadc_read_raw(struct iio_dev *indio_dev, int adc_chan = chan->channel; int ret = 0; - if (adc_chan > PALMAS_ADC_CH_MAX) + if (adc_chan >= PALMAS_ADC_CH_MAX) return -EINVAL; mutex_lock(&adc->lock); @@ -595,7 +595,7 @@ static int palmas_gpadc_read_event_config(struct iio_dev *indio_dev, int adc_chan = chan->channel; int ret = 0; - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) return -EINVAL; mutex_lock(&adc->lock); @@ -684,7 +684,7 @@ static int palmas_gpadc_write_event_config(struct iio_dev *indio_dev, int adc_chan = chan->channel; int ret; - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) return -EINVAL; mutex_lock(&adc->lock); @@ -710,7 +710,7 @@ static int palmas_gpadc_read_event_value(struct iio_dev *indio_dev, int adc_chan = chan->channel; int ret; - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) return -EINVAL; mutex_lock(&adc->lock); @@ -744,7 +744,7 @@ static int palmas_gpadc_write_event_value(struct iio_dev *indio_dev, int old; int ret; - if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) + if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH) return -EINVAL; mutex_lock(&adc->lock); From 672cde9ef80ffde9e76d38f7aa2b287c4a18de9a Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 21 Apr 2023 08:46:11 +0300 Subject: [PATCH 012/303] iio: fix doc for iio_gts_find_sel_by_int_time The kerneldoc for iio_gts_find_sel_by_int_time() has an error. Documentation states that function is searching a selector for a HW-gain while it is searching a selector for an integration time. Fix the documentation by saying the function is looking for a selector for an integration time. Fixes: 38416c28e168 ("iio: light: Add gain-time-scale helpers") Signed-off-by: Matti Vaittinen Link: https://lore.kernel.org/r/ZEIjI4YUzqPZk/9X@fedora Signed-off-by: Jonathan Cameron --- include/linux/iio/iio-gts-helper.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h index dd64e544a3daa..9cb6c80dea716 100644 --- a/include/linux/iio/iio-gts-helper.h +++ b/include/linux/iio/iio-gts-helper.h @@ -135,7 +135,7 @@ static inline int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel) /** * iio_gts_find_sel_by_int_time - find selector matching integration time * @gts: Gain time scale descriptor - * @gain: HW-gain for which matching selector is searched for + * @time: Integration time for which matching selector is searched for * * Return: a selector matching given integration time or -EINVAL if * selector was not found. From b7b04f393ffa1530213f522115e7d77761eb5c4f Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Mon, 17 Apr 2023 12:19:57 +0300 Subject: [PATCH 013/303] iio: bu27034: Fix integration time The bu27034 uses micro seconds for integration time configuration. This is incorrect as the ABI mandates use of seconds. Change BU27034 driver to use seconds for integration time. Fixes: e52afbd61039 ("iio: light: ROHM BU27034 Ambient Light Sensor") Signed-off-by: Matti Vaittinen Link: https://lore.kernel.org/r/a05647669af22ba919c7c87dccb43975e3235a87.1681722914.git.mazziesaccount@gmail.com Signed-off-by: Jonathan Cameron --- drivers/iio/light/rohm-bu27034.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c index e486dcf35eba7..25c9b79574a53 100644 --- a/drivers/iio/light/rohm-bu27034.c +++ b/drivers/iio/light/rohm-bu27034.c @@ -1167,11 +1167,12 @@ static int bu27034_read_raw(struct iio_dev *idev, switch (mask) { case IIO_CHAN_INFO_INT_TIME: - *val = bu27034_get_int_time(data); - if (*val < 0) - return *val; + *val = 0; + *val2 = bu27034_get_int_time(data); + if (*val2 < 0) + return *val2; - return IIO_VAL_INT; + return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: return bu27034_get_scale(data, chan->channel, val, val2); @@ -1229,7 +1230,10 @@ static int bu27034_write_raw(struct iio_dev *idev, ret = bu27034_set_scale(data, chan->channel, val, val2); break; case IIO_CHAN_INFO_INT_TIME: - ret = bu27034_try_set_int_time(data, val); + if (!val) + ret = bu27034_try_set_int_time(data, val2); + else + ret = -EINVAL; break; default: ret = -EINVAL; From e65065132fa6d9e1fd3b7418eae5215e3b7bf6c7 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Mon, 17 Apr 2023 12:20:18 +0300 Subject: [PATCH 014/303] iio: gts-helpers: fix integration time units The IIO ABI mandates expressing integration times in seconds. The GTS helper errorneously uses micro seconds in integration_times_available. Fix this by converting the lists to IIO_VAL_INT_PLUS_MICRO. Fixes: 38416c28e168 ("iio: light: Add gain-time-scale helpers") Signed-off-by: Matti Vaittinen Link: https://lore.kernel.org/r/eeacd192c259e885850b5a2dd8b776bccfc44fa8.1681722914.git.mazziesaccount@gmail.com Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-gts-helper.c | 42 ++++++++++++++++++++------- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c index 8bb68975b2598..7653261d2dc2b 100644 --- a/drivers/iio/industrialio-gts-helper.c +++ b/drivers/iio/industrialio-gts-helper.c @@ -337,6 +337,17 @@ static int iio_gts_build_avail_scale_table(struct iio_gts *gts) return ret; } +static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times, + int num_times) +{ + int i; + + for (i = 0; i < num_times; i++) { + int_micro_times[i * 2] = time_us[i] / 1000000; + int_micro_times[i * 2 + 1] = time_us[i] % 1000000; + } +} + /** * iio_gts_build_avail_time_table - build table of available integration times * @gts: Gain time scale descriptor @@ -351,7 +362,7 @@ static int iio_gts_build_avail_scale_table(struct iio_gts *gts) */ static int iio_gts_build_avail_time_table(struct iio_gts *gts) { - int *times, i, j, idx = 0; + int *times, i, j, idx = 0, *int_micro_times; if (!gts->num_itime) return 0; @@ -378,13 +389,24 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts) } } } - gts->avail_time_tables = times; - /* - * This is just to survive a unlikely corner-case where times in the - * given time table were not unique. Else we could just trust the - * gts->num_itime. - */ - gts->num_avail_time_tables = idx; + + /* create a list of times formatted as list of IIO_VAL_INT_PLUS_MICRO */ + int_micro_times = kcalloc(idx, sizeof(int) * 2, GFP_KERNEL); + if (int_micro_times) { + /* + * This is just to survive a unlikely corner-case where times in + * the given time table were not unique. Else we could just + * trust the gts->num_itime. + */ + gts->num_avail_time_tables = idx; + iio_gts_us_to_int_micro(times, int_micro_times, idx); + } + + gts->avail_time_tables = int_micro_times; + kfree(times); + + if (!int_micro_times) + return -ENOMEM; return 0; } @@ -683,8 +705,8 @@ int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type, return -EINVAL; *vals = gts->avail_time_tables; - *type = IIO_VAL_INT; - *length = gts->num_avail_time_tables; + *type = IIO_VAL_INT_PLUS_MICRO; + *length = gts->num_avail_time_tables * 2; return IIO_AVAIL_LIST; } From 79b8ded9d9c595db9bd5b2f62f5f738b36de1e22 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 16 Apr 2023 23:24:09 +0200 Subject: [PATCH 015/303] iio: accel: st_accel: Fix invalid mount_matrix on devices without ACPI _ONT method When apply_acpi_orientation() fails, st_accel_common_probe() will fall back to iio_read_mount_matrix(), which checks for a mount-matrix device property and if that is not set falls back to the identity matrix. But when a sensor has no ACPI companion fwnode, or when the ACPI fwnode does not have a "_ONT" method apply_acpi_orientation() was returning 0, causing iio_read_mount_matrix() to never get called resulting in an invalid mount_matrix: [root@fedora ~]# cat /sys/bus/iio/devices/iio\:device0/mount_matrix (null), (null), (null); (null), (null), (null); (null), (null), (null) Fix this by making apply_acpi_orientation() always return an error when it did not set the mount_matrix. Fixes: 3d8ad94bb175 ("iio: accel: st_sensors: Support generic mounting matrix") Signed-off-by: Hans de Goede Reviewed-by: Linus Walleij Tested-by: Marius Hoch Link: https://lore.kernel.org/r/20230416212409.310936-1-hdegoede@redhat.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/accel/st_accel_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 5f7d81b44b1d2..282e539157f87 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -1291,12 +1291,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) adev = ACPI_COMPANION(indio_dev->dev.parent); if (!adev) - return 0; + return -ENXIO; /* Read _ONT data, which should be a package of 6 integers. */ status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer); if (status == AE_NOT_FOUND) { - return 0; + return -ENXIO; } else if (ACPI_FAILURE(status)) { dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n", status); From e55245d115bb9054cb72cdd5dda5660f4484873a Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Thu, 30 Mar 2023 12:21:00 +0200 Subject: [PATCH 016/303] iio: adc: ad7192: Change "shorted" channels to differential The AD7192 provides a specific channel configuration where both negative and positive inputs are connected to AIN2. This was represented in the ad7192 driver as a IIO channel with .channel = 2 and .extended_name set to "shorted". The problem with this approach, is that the driver provided two IIO channels with the identifier .channel = 2; one "shorted" and the other not. This goes against the IIO ABI, as a channel identifier should be unique. Address this issue by changing "shorted" channels to being differential instead, with channel 2 vs. itself, as we're actually measuring AIN2 vs. itself. Note that the fix tag is for the commit that moved the driver out of staging. The bug existed before that, but backporting would become very complex further down and unlikely to happen. Fixes: b581f748cce0 ("staging: iio: adc: ad7192: move out of staging") Signed-off-by: Paul Cercueil Co-developed-by: Alisa Roman Signed-off-by: Alisa Roman Reviewed-by: Nuno Sa Link: https://lore.kernel.org/r/20230330102100.17590-1-paul@crapouillou.net Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad7192.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 55a6ab5910160..99bb604b78c8c 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -897,10 +897,6 @@ static const struct iio_info ad7195_info = { __AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \ BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) -#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \ - __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) - #define AD719x_TEMP_CHANNEL(_si, _address) \ __AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL) @@ -908,7 +904,7 @@ static const struct iio_chan_spec ad7192_channels[] = { AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M), AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M), AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP), - AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M), + AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M), AD719x_CHANNEL(4, 1, AD7192_CH_AIN1), AD719x_CHANNEL(5, 2, AD7192_CH_AIN2), AD719x_CHANNEL(6, 3, AD7192_CH_AIN3), @@ -922,7 +918,7 @@ static const struct iio_chan_spec ad7193_channels[] = { AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M), AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M), AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP), - AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M), + AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M), AD719x_CHANNEL(6, 1, AD7193_CH_AIN1), AD719x_CHANNEL(7, 2, AD7193_CH_AIN2), AD719x_CHANNEL(8, 3, AD7193_CH_AIN3), From 9c0d6ccd7d6bbd275e390b55a3390b4274291d95 Mon Sep 17 00:00:00 2001 From: Sean Nyekjaer Date: Wed, 3 May 2023 18:20:28 +0200 Subject: [PATCH 017/303] iio: adc: stm32-adc: skip adc-diff-channels setup if none is present If no adc differential channels are defined driver will fail with EINVAL: stm32-adc: probe of 48003000.adc:adc@0 failed with error -22 Fix this by skipping the initialization if no channels are defined. This applies only to the legacy way of initializing adc channels. Fixes: d7705f35448a ("iio: adc: stm32-adc: convert to device properties") Signed-off-by: Sean Nyekjaer Link: https://lore.kernel.org/r/20230503162029.3654093-1-sean@geanix.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/stm32-adc.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 1aadb2ad2cabd..c105d82c3e454 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -2006,16 +2006,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm * to get the *real* number of channels. */ ret = device_property_count_u32(dev, "st,adc-diff-channels"); - if (ret < 0) - return ret; - - ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32)); - if (ret > adc_info->max_channels) { - dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n"); - return -EINVAL; - } else if (ret > 0) { - adc->num_diff = ret; - num_channels += ret; + if (ret > 0) { + ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32)); + if (ret > adc_info->max_channels) { + dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n"); + return -EINVAL; + } else if (ret > 0) { + adc->num_diff = ret; + num_channels += ret; + } } /* Optional sample time is provided either for each, or all channels */ From 3e27ef0ced49f8ae7883c25fadf76a2086e99025 Mon Sep 17 00:00:00 2001 From: Sean Nyekjaer Date: Wed, 3 May 2023 18:20:29 +0200 Subject: [PATCH 018/303] iio: adc: stm32-adc: skip adc-channels setup if none is present If only adc differential channels are defined driver will fail with stm32-adc: probe of 48003000.adc:adc@0 failed with error -22 Fix this by skipping the initialization if no channels are defined. This applies only to the legacy way of initializing adc channels. Fixes: d7705f35448a ("iio: adc: stm32-adc: convert to device properties") Signed-off-by: Sean Nyekjaer Reviewed-by: Olivier Moysan Link: https://lore.kernel.org/r/20230503162029.3654093-2-sean@geanix.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/stm32-adc.c | 42 ++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index c105d82c3e454..bd7e2408bf281 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -2036,6 +2036,7 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev, struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX]; struct device *dev = &indio_dev->dev; u32 num_diff = adc->num_diff; + int num_se = nchans - num_diff; int size = num_diff * sizeof(*diff) / sizeof(u32); int scan_index = 0, ret, i, c; u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX]; @@ -2062,29 +2063,32 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev, scan_index++; } } - - ret = device_property_read_u32_array(dev, "st,adc-channels", chans, - nchans); - if (ret) - return ret; - - for (c = 0; c < nchans; c++) { - if (chans[c] >= adc_info->max_channels) { - dev_err(&indio_dev->dev, "Invalid channel %d\n", - chans[c]); - return -EINVAL; + if (num_se > 0) { + ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se); + if (ret) { + dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret); + return ret; } - /* Channel can't be configured both as single-ended & diff */ - for (i = 0; i < num_diff; i++) { - if (chans[c] == diff[i].vinp) { - dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]); + for (c = 0; c < num_se; c++) { + if (chans[c] >= adc_info->max_channels) { + dev_err(&indio_dev->dev, "Invalid channel %d\n", + chans[c]); return -EINVAL; } + + /* Channel can't be configured both as single-ended & diff */ + for (i = 0; i < num_diff; i++) { + if (chans[c] == diff[i].vinp) { + dev_err(&indio_dev->dev, "channel %d misconfigured\n", + chans[c]); + return -EINVAL; + } + } + stm32_adc_chan_init_one(indio_dev, &channels[scan_index], + chans[c], 0, scan_index, false); + scan_index++; } - stm32_adc_chan_init_one(indio_dev, &channels[scan_index], - chans[c], 0, scan_index, false); - scan_index++; } if (adc->nsmps > 0) { @@ -2305,7 +2309,7 @@ static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping) if (legacy) ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels, - num_channels); + timestamping ? num_channels - 1 : num_channels); else ret = stm32_adc_generic_chan_init(indio_dev, adc, channels); if (ret < 0) From 72c1d11074d6160f19760d57fc78b2a7f52bcb1a Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Mon, 24 Apr 2023 11:23:12 +0200 Subject: [PATCH 019/303] dt-bindings: iio: imx8qxp-adc: add missing vref-supply Although this property is used right now for IIO_CHAN_INFO_SCALE, this ADC has two internal reference voltages, which the driver currently doesn't make use of. Fixes: db73419d8c06 ("dt-bindings: iio: adc: Add binding documentation for NXP IMX8QXP ADC") Signed-off-by: Alexander Stein Reviewed-by: Haibo Chen Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20230424092312.61746-1-alexander.stein@ew.tq-group.com Signed-off-by: Jonathan Cameron --- .../devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml b/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml index 63369ba388e47..0a192ca192c5b 100644 --- a/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml @@ -39,6 +39,12 @@ properties: power-domains: maxItems: 1 + vref-supply: + description: | + External ADC reference voltage supply on VREFH pad. If VERID[MVI] is + set, there are additional, internal reference voltages selectable. + VREFH1 is always from VREFH pad. + "#io-channel-cells": const: 1 @@ -72,6 +78,7 @@ examples: assigned-clocks = <&clk IMX_SC_R_ADC_0>; assigned-clock-rates = <24000000>; power-domains = <&pd IMX_SC_R_ADC_0>; + vref-supply = <®_1v8>; #io-channel-cells = <1>; }; }; From a551c26e8e568fad42120843521529241b9bceec Mon Sep 17 00:00:00 2001 From: Frank Li Date: Mon, 1 May 2023 10:36:04 -0400 Subject: [PATCH 020/303] iio: light: vcnl4035: fixed chip ID check VCNL4035 register(0xE) ID_L and ID_M define as: ID_L: 0x80 ID_H: 7:6 (0:0) 5:4 (0:0) slave address = 0x60 (7-bit) (0:1) slave address = 0x51 (7-bit) (1:0) slave address = 0x40 (7-bit) (1:0) slave address = 0x41 (7-bit) 3:0 Version code default (0:0:0:0) So just check ID_L. Fixes: 55707294c4eb ("iio: light: Add support for vishay vcnl4035") Signed-off-by: Frank Li Link: https://lore.kernel.org/r/20230501143605.1615549-1-Frank.Li@nxp.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/light/vcnl4035.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c index 14e29330e972e..94f5d611e98c3 100644 --- a/drivers/iio/light/vcnl4035.c +++ b/drivers/iio/light/vcnl4035.c @@ -8,6 +8,7 @@ * TODO: Proximity */ #include +#include #include #include #include @@ -42,6 +43,7 @@ #define VCNL4035_ALS_PERS_MASK GENMASK(3, 2) #define VCNL4035_INT_ALS_IF_H_MASK BIT(12) #define VCNL4035_INT_ALS_IF_L_MASK BIT(13) +#define VCNL4035_DEV_ID_MASK GENMASK(7, 0) /* Default values */ #define VCNL4035_MODE_ALS_ENABLE BIT(0) @@ -413,6 +415,7 @@ static int vcnl4035_init(struct vcnl4035_data *data) return ret; } + id = FIELD_GET(VCNL4035_DEV_ID_MASK, id); if (id != VCNL4035_DEV_ID_VAL) { dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n", id, VCNL4035_DEV_ID_VAL); From 24febc99ca725dcf42d57168a2f4e8a75a5ade92 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Wed, 3 May 2023 11:58:17 +0200 Subject: [PATCH 021/303] iio: addac: ad74413: fix resistance input processing On success, ad74413r_get_single_adc_result() returns IIO_VAL_INT aka 1. So currently, the IIO_CHAN_INFO_PROCESSED case is effectively equivalent to the IIO_CHAN_INFO_RAW case, and we never call ad74413r_adc_to_resistance_result() to convert the adc measurement to ohms. Check ret for being negative rather than non-zero. Fixes: fea251b6a5dbd (iio: addac: add AD74413R driver) Signed-off-by: Rasmus Villemoes Reviewed-by: Nuno Sa Link: https://lore.kernel.org/r/20230503095817.452551-1-linux@rasmusvillemoes.dk Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/addac/ad74413r.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c index 07e9f6ae16a83..e3366cf5eb319 100644 --- a/drivers/iio/addac/ad74413r.c +++ b/drivers/iio/addac/ad74413r.c @@ -1007,7 +1007,7 @@ static int ad74413r_read_raw(struct iio_dev *indio_dev, ret = ad74413r_get_single_adc_result(indio_dev, chan->channel, val); - if (ret) + if (ret < 0) return ret; ad74413r_adc_to_resistance_result(*val, val); From a146eccb68be161ae9eab5f3f68bb0ed7c0fbaa8 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Mon, 8 May 2023 06:02:08 +0200 Subject: [PATCH 022/303] iio: dac: build ad5758 driver when AD5758 is selected Commit 28d1a7ac2a0d ("iio: dac: Add AD5758 support") adds the config AD5758 and the corresponding driver ad5758.c. In the Makefile, the ad5758 driver is however included when AD5755 is selected, not when AD5758 is selected. Probably, this was simply a mistake that happened by copy-and-paste and forgetting to adjust the actual line. Surprisingly, no one has ever noticed that this driver is actually only included when AD5755 is selected and that the config AD5758 has actually no effect on the build. Fixes: 28d1a7ac2a0d ("iio: dac: Add AD5758 support") Signed-off-by: Lukas Bulwahn Link: https://lore.kernel.org/r/20230508040208.12033-1-lukas.bulwahn@gmail.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/dac/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 6c74fea21736a..addd97a788389 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o obj-$(CONFIG_AD5592R) += ad5592r.o obj-$(CONFIG_AD5593R) += ad5593r.o obj-$(CONFIG_AD5755) += ad5755.o -obj-$(CONFIG_AD5755) += ad5758.o +obj-$(CONFIG_AD5758) += ad5758.o obj-$(CONFIG_AD5761) += ad5761.o obj-$(CONFIG_AD5764) += ad5764.o obj-$(CONFIG_AD5766) += ad5766.o From d049151adde4ec17392592d145e94f0086f8ee80 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Mon, 8 May 2023 14:01:25 +0300 Subject: [PATCH 023/303] iio: bu27034: Ensure reset is written The reset bit must be always written to the hardware no matter what value is in a cache or register. Ensure this by using regmap_write_bits() instead of the regmap_update_bits(). Furthermore, the SWRESET bit may be self-clearing, so mark the SYSTEM_CONTROL register volatile to guarantee we do also read the right state - should we ever need to read it. Finally, writing the SWRESET bit will restore the default register values. This can cause register cache to be outdated if there are any register values cached. Rebuild register cache after SWRESET and use regmap_update_bits() when performing the reset. Signed-off-by: Matti Vaittinen Fixes: e52afbd61039 ("iio: light: ROHM BU27034 Ambient Light Sensor") Link: https://lore.kernel.org/r/ZFjWhbfuN5XcKty+@fedora Signed-off-by: Jonathan Cameron --- drivers/iio/light/rohm-bu27034.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c index 25c9b79574a53..f85194fda6b09 100644 --- a/drivers/iio/light/rohm-bu27034.c +++ b/drivers/iio/light/rohm-bu27034.c @@ -231,6 +231,9 @@ struct bu27034_result { static const struct regmap_range bu27034_volatile_ranges[] = { { + .range_min = BU27034_REG_SYSTEM_CONTROL, + .range_max = BU27034_REG_SYSTEM_CONTROL, + }, { .range_min = BU27034_REG_MODE_CONTROL4, .range_max = BU27034_REG_MODE_CONTROL4, }, { @@ -1272,12 +1275,19 @@ static int bu27034_chip_init(struct bu27034_data *data) int ret, sel; /* Reset */ - ret = regmap_update_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL, + ret = regmap_write_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL, BU27034_MASK_SW_RESET, BU27034_MASK_SW_RESET); if (ret) return dev_err_probe(data->dev, ret, "Sensor reset failed\n"); msleep(1); + + ret = regmap_reinit_cache(data->regmap, &bu27034_regmap); + if (ret) { + dev_err(data->dev, "Failed to reinit reg cache\n"); + return ret; + } + /* * Read integration time here to ensure it is in regmap cache. We do * this to speed-up the int-time acquisition in the start of the buffer From 56cd3d1c5c5b073a1a444eafdcf97d4d866d351a Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Fri, 12 May 2023 10:53:41 +0300 Subject: [PATCH 024/303] iio: accel: kx022a fix irq getting The fwnode_irq_get_byname() was returning 0 at device-tree mapping error. If this occurred, the KX022A driver did abort the probe but errorneously directly returned the return value from fwnode_irq_get_byname() from probe. In case of a device-tree mapping error this indicated success. The fwnode_irq_get_byname() has since been fixed to not return zero on error so the check for fwnode_irq_get_byname() can be relaxed to only treat negative values as errors. This will also do decent fix even when backported to branches where fwnode_irq_get_byname() can still return zero on error because KX022A probe should later fail at IRQ requesting and a prober error handling should follow. Relax the return value check for fwnode_irq_get_byname() to only treat negative values as errors. Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202305110245.MFxC9bUj-lkp@intel.com/ Link: https://lore.kernel.org/r/202305110245.MFxC9bUj-lkp@intel.com/ Signed-off-by: Matti Vaittinen Fixes: 7c1d1677b322 ("iio: accel: Support Kionix/ROHM KX022A accelerometer") Link: https://lore.kernel.org/r/b45b4b638db109c6078d243252df3a7b0485f7d5.1683875389.git.mazziesaccount@gmail.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/accel/kionix-kx022a.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c index f98393d746668..b8636fa8eaeb6 100644 --- a/drivers/iio/accel/kionix-kx022a.c +++ b/drivers/iio/accel/kionix-kx022a.c @@ -1048,7 +1048,7 @@ int kx022a_probe_internal(struct device *dev) data->ien_reg = KX022A_REG_INC4; } else { irq = fwnode_irq_get_byname(fwnode, "INT2"); - if (irq <= 0) + if (irq < 0) return dev_err_probe(dev, irq, "No suitable IRQ\n"); data->inc_reg = KX022A_REG_INC5; From 09d3bec7009186bdba77039df01e5834788b3f95 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 11 May 2023 02:43:30 +0200 Subject: [PATCH 025/303] iio: dac: mcp4725: Fix i2c_master_send() return value handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The i2c_master_send() returns number of sent bytes on success, or negative on error. The suspend/resume callbacks expect zero on success and non-zero on error. Adapt the return value of the i2c_master_send() to the expectation of the suspend and resume callbacks, including proper validation of the return value. Fixes: cf35ad61aca2 ("iio: add mcp4725 I2C DAC driver") Signed-off-by: Marek Vasut Reviewed-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20230511004330.206942-1-marex@denx.de Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/dac/mcp4725.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 46bf758760f85..3f5661a3718fe 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -47,12 +47,18 @@ static int mcp4725_suspend(struct device *dev) struct mcp4725_data *data = iio_priv(i2c_get_clientdata( to_i2c_client(dev))); u8 outbuf[2]; + int ret; outbuf[0] = (data->powerdown_mode + 1) << 4; outbuf[1] = 0; data->powerdown = true; - return i2c_master_send(data->client, outbuf, 2); + ret = i2c_master_send(data->client, outbuf, 2); + if (ret < 0) + return ret; + else if (ret != 2) + return -EIO; + return 0; } static int mcp4725_resume(struct device *dev) @@ -60,13 +66,19 @@ static int mcp4725_resume(struct device *dev) struct mcp4725_data *data = iio_priv(i2c_get_clientdata( to_i2c_client(dev))); u8 outbuf[2]; + int ret; /* restore previous DAC value */ outbuf[0] = (data->dac_value >> 8) & 0xf; outbuf[1] = data->dac_value & 0xff; data->powerdown = false; - return i2c_master_send(data->client, outbuf, 2); + ret = i2c_master_send(data->client, outbuf, 2); + if (ret < 0) + return ret; + else if (ret != 2) + return -EIO; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend, mcp4725_resume); From 55720d242052e860b9fde445e302e0425722e7f1 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 9 May 2023 14:34:22 +0200 Subject: [PATCH 026/303] dt-bindings: iio: adc: renesas,rcar-gyroadc: Fix adi,ad7476 compatible value The conversion to json-schema accidentally dropped the "ad" part prefix from the compatible value. Fixes: 8c41245872e2 ("dt-bindings:iio:adc:renesas,rcar-gyroadc: txt to yaml conversion.") Signed-off-by: Geert Uytterhoeven Reviewed-by: Marek Vasut Reviewed-by: Krzysztof Kozlowski Reviewed-by: Wolfram Sang Link: https://lore.kernel.org/r/6b328a3f52657c20759f3a5bb2fe033d47644ba8.1683635404.git.geert+renesas@glider.be Cc: Signed-off-by: Jonathan Cameron --- .../devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml b/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml index 1c7aee5ed3e0b..36dff3250ea76 100644 --- a/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml @@ -90,7 +90,7 @@ patternProperties: of the MAX chips to the GyroADC, while MISO line of each Maxim ADC connects to a shared input pin of the GyroADC. enum: - - adi,7476 + - adi,ad7476 - fujitsu,mb88101a - maxim,max1162 - maxim,max11100 From 03262a3f5b5b910c7c2900c2f8884832794355f5 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Tue, 2 May 2023 10:50:05 -0400 Subject: [PATCH 027/303] phy: mediatek: rework the floating point comparisons to fixed point MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc on aarch64 reports drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c: In function ‘mtk_hdmi_pll_set_rate’: drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c:240:52: error: ‘-mgeneral-regs-only’ is incompatible with the use of floating-point types 240 | else if (tmds_clk >= 54 * MEGA && tmds_clk < 148.35 * MEGA) Floating point should not be used, so rework the floating point comparisons to fixed point. Signed-off-by: Tom Rix Reviewed-by: Chunfeng Yun Link: https://lore.kernel.org/r/20230502145005.2927101-1-trix@redhat.com Signed-off-by: Vinod Koul --- drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c index caa953780beeb..8aa7251de4a96 100644 --- a/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c @@ -237,11 +237,11 @@ static int mtk_hdmi_pll_calc(struct mtk_hdmi_phy *hdmi_phy, struct clk_hw *hw, */ if (tmds_clk < 54 * MEGA) txposdiv = 8; - else if (tmds_clk >= 54 * MEGA && tmds_clk < 148.35 * MEGA) + else if (tmds_clk >= 54 * MEGA && (tmds_clk * 100) < 14835 * MEGA) txposdiv = 4; - else if (tmds_clk >= 148.35 * MEGA && tmds_clk < 296.7 * MEGA) + else if ((tmds_clk * 100) >= 14835 * MEGA && (tmds_clk * 10) < 2967 * MEGA) txposdiv = 2; - else if (tmds_clk >= 296.7 * MEGA && tmds_clk <= 594 * MEGA) + else if ((tmds_clk * 10) >= 2967 * MEGA && tmds_clk <= 594 * MEGA) txposdiv = 1; else return -EINVAL; @@ -324,12 +324,12 @@ static int mtk_hdmi_pll_drv_setting(struct clk_hw *hw) clk_channel_bias = 0x34; /* 20mA */ impedance_en = 0xf; impedance = 0x36; /* 100ohm */ - } else if (pixel_clk >= 74.175 * MEGA && pixel_clk <= 300 * MEGA) { + } else if (((u64)pixel_clk * 1000) >= 74175 * MEGA && pixel_clk <= 300 * MEGA) { data_channel_bias = 0x34; /* 20mA */ clk_channel_bias = 0x2c; /* 16mA */ impedance_en = 0xf; impedance = 0x36; /* 100ohm */ - } else if (pixel_clk >= 27 * MEGA && pixel_clk < 74.175 * MEGA) { + } else if (pixel_clk >= 27 * MEGA && ((u64)pixel_clk * 1000) < 74175 * MEGA) { data_channel_bias = 0x14; /* 10mA */ clk_channel_bias = 0x14; /* 10mA */ impedance_en = 0x0; From b949193011540bb17cf1da7795ec42af1b875203 Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Fri, 12 May 2023 15:11:41 +0200 Subject: [PATCH 028/303] phy: amlogic: phy-meson-g12a-mipi-dphy-analog: fix CNTL2_DIF_TX_CTL0 value Use the same CNTL2_DIF_TX_CTL0 value used by the vendor, it was reported fixing timings issues. Fixes: 2a56dc650e54 ("phy: amlogic: Add G12A Analog MIPI D-PHY driver") Signed-off-by: Neil Armstrong Link: https://lore.kernel.org/r/20230512-amlogic-v6-4-upstream-dsi-ccf-vim3-v4-10-2592c29ea263@linaro.org Signed-off-by: Vinod Koul --- drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c index c14089fa7db49..cabdddbbabfd7 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c +++ b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c @@ -70,7 +70,7 @@ static int phy_g12a_mipi_dphy_analog_power_on(struct phy *phy) HHI_MIPI_CNTL1_BANDGAP); regmap_write(priv->regmap, HHI_MIPI_CNTL2, - FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) | + FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x45a) | FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680)); reg = DSI_LANE_CLK; From 2a881183dc5ab2474ef602e48fe7af34db460d95 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sun, 7 May 2023 16:48:18 +0200 Subject: [PATCH 029/303] phy: qcom-snps: correct struct qcom_snps_hsphy kerneldoc Update kerneldoc of struct qcom_snps_hsphy to fix: drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c:135: warning: Function parameter or member 'update_seq_cfg' not described in 'qcom_snps_hsphy' Signed-off-by: Krzysztof Kozlowski Reviewed-by: Konrad Dybcio Link: https://lore.kernel.org/r/20230507144818.193039-1-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul --- drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c index a590635962140..6c237f3cc66db 100644 --- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c +++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c @@ -115,11 +115,11 @@ struct phy_override_seq { * * @cfg_ahb_clk: AHB2PHY interface clock * @ref_clk: phy reference clock - * @iface_clk: phy interface clock * @phy_reset: phy reset control * @vregs: regulator supplies bulk data * @phy_initialized: if PHY has been initialized correctly * @mode: contains the current mode the PHY is in + * @update_seq_cfg: tuning parameters for phy init */ struct qcom_snps_hsphy { struct phy *phy; From 17eabd6a044b57b2c1b5459eb9d11af3ea909e63 Mon Sep 17 00:00:00 2001 From: Bob Pearson Date: Mon, 15 May 2023 15:10:57 -0500 Subject: [PATCH 030/303] RDMA/rxe: Fix double unlock in rxe_qp.c A recent patch can cause a double spin_unlock_bh() in rxe_qp_to_attr() at line 715 in rxe_qp.c. Move the 2nd unlock into the if statement. Fixes: f605f26ea196 ("RDMA/rxe: Protect QP state with qp->state_lock") Link: https://lore.kernel.org/r/20230515201056.1591140-1-rpearsonhpe@gmail.com Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/27773078-40ce-414f-8b97-781954da9f25@kili.mountain Signed-off-by: Bob Pearson Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_qp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index c5451a4488ca3..245dd36638c73 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -712,8 +712,9 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) if (qp->attr.sq_draining) { spin_unlock_bh(&qp->state_lock); cond_resched(); + } else { + spin_unlock_bh(&qp->state_lock); } - spin_unlock_bh(&qp->state_lock); return 0; } From b5f3fe27c54b1fe77d4dd834ccd48a6d694f872e Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Wed, 10 May 2023 11:50:56 +0800 Subject: [PATCH 031/303] RDMA/rxe: Convert spin_{lock_bh,unlock_bh} to spin_{lock_irqsave,unlock_irqrestore} We need to call spin_lock_irqsave()/spin_unlock_irqrestore() for state_lock in rxe, otherwsie the callchain: ib_post_send_mad -> spin_lock_irqsave -> ib_post_send -> rxe_post_send -> spin_lock_bh -> spin_unlock_bh -> spin_unlock_irqrestore Causes below traces during run block nvmeof-mp/001 test due to mismatched spinlock nesting: WARNING: CPU: 0 PID: 94794 at kernel/softirq.c:376 __local_bh_enable_ip+0xc2/0x140 [ ... ] CPU: 0 PID: 94794 Comm: kworker/u4:1 Tainted: G E 6.4.0-rc1 #9 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014 Workqueue: rdma_cm cma_work_handler [rdma_cm] RIP: 0010:__local_bh_enable_ip+0xc2/0x140 Code: 48 85 c0 74 72 5b 41 5c 5d 31 c0 89 c2 89 c1 89 c6 89 c7 41 89 c0 e9 bd 0e 11 01 65 8b 05 f2 65 72 48 85 c0 0f 85 76 ff ff ff <0f> 0b e9 6f ff ff ff e8 d2 39 1c 00 eb 80 4c 89 e7 e8 68 ad 0a 00 RSP: 0018:ffffb7cf818539f0 EFLAGS: 00010046 RAX: 0000000000000000 RBX: 0000000000000201 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000201 RDI: ffffffffc0f25f79 RBP: ffffb7cf81853a00 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffffffffc0f25f79 R13: ffff8db1f0fa6000 R14: ffff8db2c63ff000 R15: 00000000000000e8 FS: 0000000000000000(0000) GS:ffff8db33bc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000559758db0f20 CR3: 0000000105124000 CR4: 00000000003506f0 Call Trace: _raw_spin_unlock_bh+0x31/0x40 rxe_post_send+0x59/0x8b0 [rdma_rxe] ib_send_mad+0x26b/0x470 [ib_core] ib_post_send_mad+0x150/0xb40 [ib_core] ? cm_form_tid+0x5b/0x90 [ib_cm] ib_send_cm_req+0x7c8/0xb70 [ib_cm] rdma_connect_locked+0x433/0x940 [rdma_cm] nvme_rdma_cm_handler+0x5d7/0x9c0 [nvme_rdma] cma_cm_event_handler+0x4f/0x170 [rdma_cm] cma_work_handler+0x6a/0xe0 [rdma_cm] process_one_work+0x2a9/0x580 worker_thread+0x52/0x3f0 ? __pfx_worker_thread+0x10/0x10 kthread+0x109/0x140 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x2c/0x50 raw_local_irq_restore() called with IRQs enabled WARNING: CPU: 0 PID: 94794 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x37/0x60 [ ... ] CPU: 0 PID: 94794 Comm: kworker/u4:1 Tainted: G W E 6.4.0-rc1 #9 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014 Workqueue: rdma_cm cma_work_handler [rdma_cm] RIP: 0010:warn_bogus_irq_restore+0x37/0x60 Code: fb 01 77 36 83 e3 01 74 0e 48 8b 5d f8 c9 31 f6 89 f7 e9 ac ea 01 00 48 c7 c7 e0 52 33 b9 c6 05 bb 1c 69 01 01 e8 39 24 f0 fe <0f> 0b 48 8b 5d f8 c9 31 f6 89 f7 e9 89 ea 01 00 0f b6 f3 48 c7 c7 RSP: 0018:ffffb7cf81853a58 EFLAGS: 00010246 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffffb7cf81853a60 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffff8db2cfb1a9e8 R13: ffff8db2cfb1a9d8 R14: ffff8db2c63ff000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff8db33bc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000559758db0f20 CR3: 0000000105124000 CR4: 00000000003506f0 Call Trace: _raw_spin_unlock_irqrestore+0x91/0xa0 ib_send_mad+0x1e3/0x470 [ib_core] ib_post_send_mad+0x150/0xb40 [ib_core] ? cm_form_tid+0x5b/0x90 [ib_cm] ib_send_cm_req+0x7c8/0xb70 [ib_cm] rdma_connect_locked+0x433/0x940 [rdma_cm] nvme_rdma_cm_handler+0x5d7/0x9c0 [nvme_rdma] cma_cm_event_handler+0x4f/0x170 [rdma_cm] cma_work_handler+0x6a/0xe0 [rdma_cm] process_one_work+0x2a9/0x580 worker_thread+0x52/0x3f0 ? __pfx_worker_thread+0x10/0x10 kthread+0x109/0x140 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x2c/0x50 Fixes: f605f26ea196 ("RDMA/rxe: Protect QP state with qp->state_lock") Link: https://lore.kernel.org/r/20230510035056.881196-1-guoqing.jiang@linux.dev Signed-off-by: Guoqing Jiang Reviewed-by: Bob Pearson Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_comp.c | 26 +++++++++++-------- drivers/infiniband/sw/rxe/rxe_net.c | 7 +++--- drivers/infiniband/sw/rxe/rxe_qp.c | 36 +++++++++++++++++---------- drivers/infiniband/sw/rxe/rxe_recv.c | 9 ++++--- drivers/infiniband/sw/rxe/rxe_req.c | 30 ++++++++++++---------- drivers/infiniband/sw/rxe/rxe_resp.c | 14 ++++++----- drivers/infiniband/sw/rxe/rxe_verbs.c | 25 ++++++++++--------- 7 files changed, 86 insertions(+), 61 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index db18ace74d2b5..f46c5a5fd0aea 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -115,15 +115,16 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) void retransmit_timer(struct timer_list *t) { struct rxe_qp *qp = from_timer(qp, t, retrans_timer); + unsigned long flags; rxe_dbg_qp(qp, "retransmit timer fired\n"); - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp->valid) { qp->comp.timeout = 1; rxe_sched_task(&qp->comp.task); } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) @@ -481,11 +482,13 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) static void comp_check_sq_drain_done(struct rxe_qp *qp) { - spin_lock_bh(&qp->state_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); if (unlikely(qp_state(qp) == IB_QPS_SQD)) { if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) { qp->attr.sq_draining = 0; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -499,7 +502,7 @@ static void comp_check_sq_drain_done(struct rxe_qp *qp) return; } } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } static inline enum comp_state complete_ack(struct rxe_qp *qp, @@ -625,13 +628,15 @@ static void free_pkt(struct rxe_pkt_info *pkt) */ static void reset_retry_timer(struct rxe_qp *qp) { + unsigned long flags; + if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) { - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp_state(qp) >= IB_QPS_RTS && psn_compare(qp->req.psn, qp->comp.psn) > 0) mod_timer(&qp->retrans_timer, jiffies + qp->qp_timeout_jiffies); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } } @@ -643,18 +648,19 @@ int rxe_completer(struct rxe_qp *qp) struct rxe_pkt_info *pkt = NULL; enum comp_state state; int ret; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (!qp->valid || qp_state(qp) == IB_QPS_ERR || qp_state(qp) == IB_QPS_RESET) { bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); drain_resp_pkts(qp); flush_send_queue(qp, notify); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); goto exit; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->comp.timeout) { qp->comp.timeout_retry = 1; diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 2bc7361152ea7..a38fab19bed19 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -412,15 +412,16 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, int err; int is_request = pkt->mask & RXE_REQ_MASK; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || (!is_request && (qp_state(qp) < IB_QPS_RTR))) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); goto drop; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); rxe_icrc_generate(skb, pkt); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 245dd36638c73..61a2eb77d9999 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -300,6 +300,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct rxe_cq *rcq = to_rcq(init->recv_cq); struct rxe_cq *scq = to_rcq(init->send_cq); struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; + unsigned long flags; rxe_get(pd); rxe_get(rcq); @@ -325,10 +326,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, if (err) goto err2; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); qp->attr.qp_state = IB_QPS_RESET; qp->valid = 1; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return 0; @@ -492,24 +493,28 @@ static void rxe_qp_reset(struct rxe_qp *qp) /* move the qp to the error state */ void rxe_qp_error(struct rxe_qp *qp) { - spin_lock_bh(&qp->state_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); qp->attr.qp_state = IB_QPS_ERR; /* drain work and packet queues */ rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->req.task); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) { - spin_lock_bh(&qp->state_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); qp->attr.sq_draining = 1; rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->req.task); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } /* caller should hold qp->state_lock */ @@ -555,14 +560,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, qp->attr.cur_qp_state = attr->qp_state; if (mask & IB_QP_STATE) { - spin_lock_bh(&qp->state_lock); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); err = __qp_chk_state(qp, attr, mask); if (!err) { qp->attr.qp_state = attr->qp_state; rxe_dbg_qp(qp, "state -> %s\n", qps2str[attr->qp_state]); } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (err) return err; @@ -688,6 +695,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, /* called by the query qp verb */ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) { + unsigned long flags; + *attr = qp->attr; attr->rq_psn = qp->resp.psn; @@ -708,12 +717,12 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) /* Applications that get this state typically spin on it. * Yield the processor */ - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp->attr.sq_draining) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); cond_resched(); } else { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } return 0; @@ -737,10 +746,11 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp) static void rxe_qp_do_cleanup(struct work_struct *work) { struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); qp->valid = 0; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); qp->qp_timeout_jiffies = 0; if (qp_type(qp) == IB_QPT_RC) { diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 2f953cc74256d..5861e42440490 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -14,6 +14,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct rxe_qp *qp) { unsigned int pkt_type; + unsigned long flags; if (unlikely(!qp->valid)) return -EINVAL; @@ -38,19 +39,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, return -EINVAL; } - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (pkt->mask & RXE_REQ_MASK) { if (unlikely(qp_state(qp) < IB_QPS_RTR)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return -EINVAL; } } else { if (unlikely(qp_state(qp) < IB_QPS_RTS)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return -EINVAL; } } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return 0; } diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 65134a9aefe7b..5fe7cbae30313 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -99,17 +99,18 @@ static void req_retry(struct rxe_qp *qp) void rnr_nak_timer(struct timer_list *t) { struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); + unsigned long flags; rxe_dbg_qp(qp, "nak timer fired\n"); - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp->valid) { /* request a send queue retry */ qp->req.need_retry = 1; qp->req.wait_for_rnr_timer = 0; rxe_sched_task(&qp->req.task); } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } static void req_check_sq_drain_done(struct rxe_qp *qp) @@ -118,8 +119,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) unsigned int index; unsigned int cons; struct rxe_send_wqe *wqe; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp_state(qp) == IB_QPS_SQD) { q = qp->sq.queue; index = qp->req.wqe_index; @@ -140,7 +142,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) break; qp->attr.sq_draining = 0; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -154,7 +156,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) return; } while (0); } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); } static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) @@ -173,6 +175,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) { struct rxe_send_wqe *wqe; + unsigned long flags; req_check_sq_drain_done(qp); @@ -180,13 +183,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) if (wqe == NULL) return NULL; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (unlikely((qp_state(qp) == IB_QPS_SQD) && (wqe->state != wqe_state_processing))) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return NULL; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); return wqe; @@ -676,16 +679,17 @@ int rxe_requester(struct rxe_qp *qp) struct rxe_queue *q = qp->sq.queue; struct rxe_ah *ah; struct rxe_av *av; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (unlikely(!qp->valid)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); goto exit; } if (unlikely(qp_state(qp) == IB_QPS_ERR)) { wqe = __req_next_wqe(qp); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (wqe) goto err; else @@ -700,10 +704,10 @@ int rxe_requester(struct rxe_qp *qp) qp->req.wait_psn = 0; qp->req.need_retry = 0; qp->req.wait_for_rnr_timer = 0; - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); goto exit; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); /* we come here if the retransmit timer has fired * or if the rnr timer has fired. If the retransmit diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 68f6cd188d8ed..1da044f6b7d49 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -1047,6 +1047,7 @@ static enum resp_states do_complete(struct rxe_qp *qp, struct ib_uverbs_wc *uwc = &cqe.uibwc; struct rxe_recv_wqe *wqe = qp->resp.wqe; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + unsigned long flags; if (!wqe) goto finish; @@ -1137,12 +1138,12 @@ static enum resp_states do_complete(struct rxe_qp *qp, return RESPST_ERR_CQ_OVERFLOW; finish: - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (unlikely(qp_state(qp) == IB_QPS_ERR)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return RESPST_CHK_RESOURCE; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (unlikely(!pkt)) return RESPST_DONE; @@ -1468,18 +1469,19 @@ int rxe_responder(struct rxe_qp *qp) enum resp_states state; struct rxe_pkt_info *pkt = NULL; int ret; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (!qp->valid || qp_state(qp) == IB_QPS_ERR || qp_state(qp) == IB_QPS_RESET) { bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); drain_req_pkts(qp); flush_recv_queue(qp, notify); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); goto exit; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index dea605b7f6833..4d8f6b8051ff7 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -904,10 +904,10 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, if (!err) rxe_sched_task(&qp->req.task); - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp_state(qp) == IB_QPS_ERR) rxe_sched_task(&qp->comp.task); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return err; } @@ -917,22 +917,23 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, { struct rxe_qp *qp = to_rqp(ibqp); int err; + unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); /* caller has already called destroy_qp */ if (WARN_ON_ONCE(!qp->valid)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); rxe_err_qp(qp, "qp has been destroyed"); return -EINVAL; } if (unlikely(qp_state(qp) < IB_QPS_RTS)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); *bad_wr = wr; rxe_err_qp(qp, "qp not ready to send"); return -EINVAL; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (qp->is_user) { /* Utilize process context to do protocol processing */ @@ -1008,22 +1009,22 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, struct rxe_rq *rq = &qp->rq; unsigned long flags; - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); /* caller has already called destroy_qp */ if (WARN_ON_ONCE(!qp->valid)) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); rxe_err_qp(qp, "qp has been destroyed"); return -EINVAL; } /* see C10-97.2.1 */ if (unlikely((qp_state(qp) < IB_QPS_INIT))) { - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); *bad_wr = wr; rxe_dbg_qp(qp, "qp not ready to post recv"); return -EINVAL; } - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); if (unlikely(qp->srq)) { *bad_wr = wr; @@ -1044,10 +1045,10 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, spin_unlock_irqrestore(&rq->producer_lock, flags); - spin_lock_bh(&qp->state_lock); + spin_lock_irqsave(&qp->state_lock, flags); if (qp_state(qp) == IB_QPS_ERR) rxe_sched_task(&qp->resp.task); - spin_unlock_bh(&qp->state_lock); + spin_unlock_irqrestore(&qp->state_lock, flags); return err; } From 866422cdddcdf59d8c68e9472d49ba1be29b5fcf Mon Sep 17 00:00:00 2001 From: Yonatan Nachum Date: Thu, 11 May 2023 11:51:03 +0000 Subject: [PATCH 032/303] RDMA/efa: Fix unsupported page sizes in device Device uses 4KB size blocks for user pages indirect list while the driver creates those blocks with the size of PAGE_SIZE of the kernel. On kernels with PAGE_SIZE different than 4KB (ARM RHEL), this leads to a failure on register MR with indirect list because of the miss communication between driver and device. Fixes: 40909f664d27 ("RDMA/efa: Add EFA verbs implementation") Link: https://lore.kernel.org/r/20230511115103.13876-1-ynachum@amazon.com Reviewed-by: Firas Jahjah Reviewed-by: Michael Margolin Signed-off-by: Yonatan Nachum Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/efa/efa_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 8eca6c14d0cfc..2a195c4b0f17d 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1403,7 +1403,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev, */ static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) { - u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE); + u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE); struct scatterlist *sgl; int sg_dma_cnt, err; From 0642287e3ecdd0d1f88e6a2e63768e16153a990c Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 8 May 2023 23:07:16 -0700 Subject: [PATCH 033/303] dmaengine: idxd: Fix passing freed memory in idxd_cdev_open() Smatch warns: drivers/dma/idxd/cdev.c:327: idxd_cdev_open() warn: 'sva' was already freed. When idxd_wq_set_pasid() fails, the current code unbinds sva and then goes to 'failed_set_pasid' where iommu_sva_unbind_device is called again causing the above warning. [ device_user_pasid_enabled(idxd) is still true when calling failed_set_pasid ] Fix this by removing additional unbind when idxd_wq_set_pasid() fails Fixes: b022f59725f0 ("dmaengine: idxd: add idxd_copy_cr() to copy user completion record during page fault handling") Signed-off-by: Harshit Mogalapalli Acked-by: Fenghua Yu Acked-by: Dave Jiang Link: https://lore.kernel.org/r/20230509060716.2830630-1-harshit.m.mogalapalli@oracle.com Signed-off-by: Vinod Koul --- drivers/dma/idxd/cdev.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index ecbf67c2ad2b0..d32deb9b4e3de 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -277,7 +277,6 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) if (wq_dedicated(wq)) { rc = idxd_wq_set_pasid(wq, pasid); if (rc < 0) { - iommu_sva_unbind_device(sva); dev_err(dev, "wq set pasid failed: %d\n", rc); goto failed_set_pasid; } From 38de368a66360f1859428d5e191b45bd01c20786 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 16 May 2023 23:13:11 +0530 Subject: [PATCH 034/303] dmaengine: ti: k3-udma: annotate pm function with __maybe_unused We get a warning when PM is not set: ../drivers/dma/ti/k3-udma.c:5552:12: warning: 'udma_pm_resume' defined but not used [-Wunused-function] 5552 | static int udma_pm_resume(struct device *dev) | ^~~~~~~~~~~~~~ ../drivers/dma/ti/k3-udma.c:5530:12: warning: 'udma_pm_suspend' defined but not used [-Wunused-function] 5530 | static int udma_pm_suspend(struct device *dev) | ^~~~~~~~~~~~~~~ Fix this by annotating pm function with __maybe_unused Fixes: fbe05149e40b ("dmaengine: ti: k3-udma: Add system suspend/resume support") Reported-by: Randy Dunlap Signed-off-by: Vinod Koul Acked-by: Randy Dunlap Tested-by: Randy Dunlap # build-tested Link: https://lore.kernel.org/r/20230516174311.117264-1-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/dma/ti/k3-udma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index fc3a2a05ab7b7..b8329a23728d7 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -5527,7 +5527,7 @@ static int udma_probe(struct platform_device *pdev) return ret; } -static int udma_pm_suspend(struct device *dev) +static int __maybe_unused udma_pm_suspend(struct device *dev) { struct udma_dev *ud = dev_get_drvdata(dev); struct dma_device *dma_dev = &ud->ddev; @@ -5549,7 +5549,7 @@ static int udma_pm_suspend(struct device *dev) return 0; } -static int udma_pm_resume(struct device *dev) +static int __maybe_unused udma_pm_resume(struct device *dev) { struct udma_dev *ud = dev_get_drvdata(dev); struct dma_device *dma_dev = &ud->ddev; From 58caa2a51ad4fd21763696cc6c4defc9fc1b4b4f Mon Sep 17 00:00:00 2001 From: Chengchang Tang Date: Fri, 12 May 2023 17:22:43 +0800 Subject: [PATCH 035/303] RDMA/hns: Fix timeout attr in query qp for HIP08 On HIP08, the queried timeout attr is different from the timeout attr configured by the user. It is found by rdma-core testcase test_rdmacm_async_traffic: ====================================================================== FAIL: test_rdmacm_async_traffic (tests.test_rdmacm.CMTestCase) ---------------------------------------------------------------------- Traceback (most recent call last): File "./tests/test_rdmacm.py", line 33, in test_rdmacm_async_traffic self.two_nodes_rdmacm_traffic(CMAsyncConnection, self.rdmacm_traffic, File "./tests/base.py", line 382, in two_nodes_rdmacm_traffic raise(res) AssertionError Fixes: 926a01dc000d ("RDMA/hns: Add QP operations support for hip08 SoC") Link: https://lore.kernel.org/r/20230512092245.344442-2-huangjunxian6@hisilicon.com Signed-off-by: Chengchang Tang Signed-off-by: Junxian Huang Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 17 ++++++++++++++--- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 ++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 84f1167de1d9e..3a1c90406ed9a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5012,7 +5012,6 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) { #define QP_ACK_TIMEOUT_MAX_HIP08 20 -#define QP_ACK_TIMEOUT_OFFSET 10 #define QP_ACK_TIMEOUT_MAX 31 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { @@ -5021,7 +5020,7 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) "local ACK timeout shall be 0 to 20.\n"); return false; } - *timeout += QP_ACK_TIMEOUT_OFFSET; + *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { if (*timeout > QP_ACK_TIMEOUT_MAX) { ibdev_warn(&hr_dev->ib_dev, @@ -5307,6 +5306,18 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn, return ret; } +static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev, + struct hns_roce_v2_qp_context *context) +{ + u8 timeout; + + timeout = (u8)hr_reg_read(context, QPC_AT); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; + + return timeout; +} + static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) @@ -5384,7 +5395,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX); qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME); - qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT); + qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context); qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT); qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 1b44d2434ab41..7033eae2407c5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -44,6 +44,8 @@ #define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000 #define HNS_ROCE_V2_RSV_XRCD_NUM 0 +#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08 10 + #define HNS_ROCE_V3_SCCC_SZ 64 #define HNS_ROCE_V3_GMV_ENTRY_SZ 32 From 7f3969b14f356dd65fa95b3528eb05c32e68bc06 Mon Sep 17 00:00:00 2001 From: Chengchang Tang Date: Fri, 12 May 2023 17:22:44 +0800 Subject: [PATCH 036/303] RDMA/hns: Fix base address table allocation For hns, the specification of an entry like resource (E.g. WQE/CQE/EQE) depends on BT page size, buf page size and hopnum. For user mode, the buf page size depends on UMEM. Therefore, the actual specification is controlled by BT page size and hopnum. The current BT page size and hopnum are obtained from firmware. This makes the driver inflexible and introduces unnecessary constraints. Resource allocation failures occur in many scenarios. This patch will calculate whether the BT page size set by firmware is sufficient before allocating BT, and increase the BT page size if it is insufficient. Fixes: 1133401412a9 ("RDMA/hns: Optimize base address table config flow for qp buffer") Link: https://lore.kernel.org/r/20230512092245.344442-3-huangjunxian6@hisilicon.com Signed-off-by: Chengchang Tang Signed-off-by: Junxian Huang Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_mr.c | 43 +++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 37a5cf62f88b4..14376490ac226 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -33,6 +33,7 @@ #include #include +#include #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" @@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, return page_cnt; } +static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum) +{ + return int_pow(ba_per_bt, hopnum - 1); +} + +static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr, + unsigned int pg_shift) +{ + unsigned long cap = hr_dev->caps.page_size_cap; + struct hns_roce_buf_region *re; + unsigned int pgs_per_l1ba; + unsigned int ba_per_bt; + unsigned int ba_num; + int i; + + for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) { + if (!(BIT(pg_shift) & cap)) + continue; + + ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN; + ba_num = 0; + for (i = 0; i < mtr->hem_cfg.region_count; i++) { + re = &mtr->hem_cfg.region[i]; + if (re->hopnum == 0) + continue; + + pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum); + ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba); + } + + if (ba_num <= ba_per_bt) + return pg_shift; + } + + return 0; +} + static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, unsigned int ba_page_shift) { @@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, hns_roce_hem_list_init(&mtr->hem_list); if (!cfg->is_direct) { + ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift); + if (!ba_page_shift) + return -ERANGE; + ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, cfg->region, cfg->region_count, ba_page_shift); From 56518a603fd2bf74762d176ac980572db84a3e14 Mon Sep 17 00:00:00 2001 From: Yangyang Li Date: Fri, 12 May 2023 17:22:45 +0800 Subject: [PATCH 037/303] RDMA/hns: Modify the value of long message loopback slice Long message loopback slice is used for achieving traffic balance between QPs. It prevents the problem that QPs with large traffic occupying the hardware pipeline for a long time and QPs with small traffic cannot be scheduled. Currently, its maximum value is set to 16K, which means only after a QP sends 16K will the second QP be scheduled. This value is too large, which will lead to unbalanced traffic scheduling, and thus it needs to be modified. The setting range of the long message loopback slice is modified to be from 1024 (the lower limit supported by hardware) to mtu. Actual testing shows that this value can significantly reduce error in hardware traffic scheduling. This solution is compatible with both HIP08 and HIP09. The modified lp_pktn_ini has a maximum value of 2 (when mtu is 256), so the range checking code for lp_pktn_ini is no longer necessary and needs to be deleted. Fixes: 0e60778efb07 ("RDMA/hns: Modify the value of MAX_LP_MSG_LEN to meet hardware compatibility") Link: https://lore.kernel.org/r/20230512092245.344442-4-huangjunxian6@hisilicon.com Signed-off-by: Yangyang Li Signed-off-by: Junxian Huang Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 3a1c90406ed9a..d4c6b9bc0a4ea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4583,11 +4583,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, mtu = ib_mtu_enum_to_int(ib_mtu); if (WARN_ON(mtu <= 0)) return -EINVAL; -#define MAX_LP_MSG_LEN 16384 - /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */ - lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu); - if (WARN_ON(lp_pktn_ini >= 0xF)) - return -EINVAL; +#define MIN_LP_MSG_LEN 1024 + /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */ + lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu); if (attr_mask & IB_QP_PATH_MTU) { hr_reg_write(context, QPC_MTU, ib_mtu); From 3981514180c987a79ea98f0ae06a7cbf58a9ac0f Mon Sep 17 00:00:00 2001 From: Jim Wylder Date: Wed, 17 May 2023 10:20:11 -0500 Subject: [PATCH 038/303] regmap: Account for register length when chunking Currently, when regmap_raw_write() splits the data, it uses the max_raw_write value defined for the bus. For any bus that includes the target register address in the max_raw_write value, the chunked transmission will always exceed the maximum transmission length. To avoid this problem, subtract the length of the register and the padding from the maximum transmission. Signed-off-by: Jim Wylder max_raw_write - map->format.reg_bytes - + map->format.pad_bytes; int ret, i; if (!val_count) @@ -2089,8 +2091,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, if (map->use_single_write) chunk_regs = 1; - else if (map->max_raw_write && val_len > map->max_raw_write) - chunk_regs = map->max_raw_write / val_bytes; + else if (map->max_raw_write && val_len > max_data) + chunk_regs = max_data / val_bytes; chunk_count = val_count / chunk_regs; chunk_bytes = chunk_regs * val_bytes; From 59112e9c390be595224e427827475a6cd3726021 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 18 May 2023 11:09:15 +0100 Subject: [PATCH 039/303] KVM: arm64: vgic: Fix a circular locking issue Lockdep reports a circular lock dependency between the srcu and the config_lock: [ 262.179917] -> #1 (&kvm->srcu){.+.+}-{0:0}: [ 262.182010] __synchronize_srcu+0xb0/0x224 [ 262.183422] synchronize_srcu_expedited+0x24/0x34 [ 262.184554] kvm_io_bus_register_dev+0x324/0x50c [ 262.185650] vgic_register_redist_iodev+0x254/0x398 [ 262.186740] vgic_v3_set_redist_base+0x3b0/0x724 [ 262.188087] kvm_vgic_addr+0x364/0x600 [ 262.189189] vgic_set_common_attr+0x90/0x544 [ 262.190278] vgic_v3_set_attr+0x74/0x9c [ 262.191432] kvm_device_ioctl+0x2a0/0x4e4 [ 262.192515] __arm64_sys_ioctl+0x7ac/0x1ba8 [ 262.193612] invoke_syscall.constprop.0+0x70/0x1e0 [ 262.195006] do_el0_svc+0xe4/0x2d4 [ 262.195929] el0_svc+0x44/0x8c [ 262.196917] el0t_64_sync_handler+0xf4/0x120 [ 262.198238] el0t_64_sync+0x190/0x194 [ 262.199224] [ 262.199224] -> #0 (&kvm->arch.config_lock){+.+.}-{3:3}: [ 262.201094] __lock_acquire+0x2b70/0x626c [ 262.202245] lock_acquire+0x454/0x778 [ 262.203132] __mutex_lock+0x190/0x8b4 [ 262.204023] mutex_lock_nested+0x24/0x30 [ 262.205100] vgic_mmio_write_v3_misc+0x5c/0x2a0 [ 262.206178] dispatch_mmio_write+0xd8/0x258 [ 262.207498] __kvm_io_bus_write+0x1e0/0x350 [ 262.208582] kvm_io_bus_write+0xe0/0x1cc [ 262.209653] io_mem_abort+0x2ac/0x6d8 [ 262.210569] kvm_handle_guest_abort+0x9b8/0x1f88 [ 262.211937] handle_exit+0xc4/0x39c [ 262.212971] kvm_arch_vcpu_ioctl_run+0x90c/0x1c04 [ 262.214154] kvm_vcpu_ioctl+0x450/0x12f8 [ 262.215233] __arm64_sys_ioctl+0x7ac/0x1ba8 [ 262.216402] invoke_syscall.constprop.0+0x70/0x1e0 [ 262.217774] do_el0_svc+0xe4/0x2d4 [ 262.218758] el0_svc+0x44/0x8c [ 262.219941] el0t_64_sync_handler+0xf4/0x120 [ 262.221110] el0t_64_sync+0x190/0x194 Note that the current report, which can be triggered by the vgic_irq kselftest, is a triple chain that includes slots_lock, but after inverting the slots_lock/config_lock dependency, the actual problem reported above remains. In several places, the vgic code calls kvm_io_bus_register_dev(), which synchronizes the srcu, while holding config_lock (#1). And the MMIO handler takes the config_lock while holding the srcu read lock (#0). Break dependency #1, by registering the distributor and redistributors without holding config_lock. The ITS also uses kvm_io_bus_register_dev() but already relies on slots_lock to serialize calls. The distributor iodev is created on the first KVM_RUN call. Multiple threads will race for vgic initialization, and only the first one will see !vgic_ready() under the lock. To serialize those threads, rely on slots_lock rather than config_lock. Redistributors are created earlier, through KVM_DEV_ARM_VGIC_GRP_ADDR ioctls and vCPU creation. Similarly, serialize the iodev creation with slots_lock, and the rest with config_lock. Fixes: f00327731131 ("KVM: arm64: Use config_lock to protect vgic state") Signed-off-by: Jean-Philippe Brucker Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230518100914.2837292-2-jean-philippe@linaro.org --- arch/arm64/kvm/vgic/vgic-init.c | 25 ++++++++++++++++----- arch/arm64/kvm/vgic/vgic-kvm-device.c | 10 +++++++-- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 31 ++++++++++++++++++--------- arch/arm64/kvm/vgic/vgic-mmio.c | 9 ++------ arch/arm64/kvm/vgic/vgic-v2.c | 6 ------ arch/arm64/kvm/vgic/vgic-v3.c | 7 ------ 6 files changed, 51 insertions(+), 37 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 9d42c7cb2b588..c199ba2f192ef 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) * KVM io device for the redistributor that belongs to this VCPU. */ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { - mutex_lock(&vcpu->kvm->arch.config_lock); + mutex_lock(&vcpu->kvm->slots_lock); ret = vgic_register_redist_iodev(vcpu); - mutex_unlock(&vcpu->kvm->arch.config_lock); + mutex_unlock(&vcpu->kvm->slots_lock); } return ret; } @@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm) int kvm_vgic_map_resources(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; + gpa_t dist_base; int ret = 0; if (likely(vgic_ready(kvm))) return 0; + mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->arch.config_lock); if (vgic_ready(kvm)) goto out; @@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm) else ret = vgic_v3_map_resources(kvm); - if (ret) + if (ret) { __kvm_vgic_destroy(kvm); - else - dist->ready = true; + goto out; + } + dist->ready = true; + dist_base = dist->vgic_dist_base; + mutex_unlock(&kvm->arch.config_lock); + + ret = vgic_register_dist_iodev(kvm, dist_base, + kvm_vgic_global_state.type); + if (ret) { + kvm_err("Unable to register VGIC dist MMIO regions\n"); + kvm_vgic_destroy(kvm); + } + mutex_unlock(&kvm->slots_lock); + return ret; out: mutex_unlock(&kvm->arch.config_lock); + mutex_unlock(&kvm->slots_lock); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 35cfa268fd5de..212b73a715c1c 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri if (get_user(addr, uaddr)) return -EFAULT; - mutex_lock(&kvm->arch.config_lock); + /* + * Since we can't hold config_lock while registering the redistributor + * iodevs, take the slots_lock immediately. + */ + mutex_lock(&kvm->slots_lock); switch (attr->attr) { case KVM_VGIC_V2_ADDR_TYPE_DIST: r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); @@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri if (r) goto out; + mutex_lock(&kvm->arch.config_lock); if (write) { r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size); if (!r) @@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri } else { addr = *addr_ptr; } + mutex_unlock(&kvm->arch.config_lock); out: - mutex_unlock(&kvm->arch.config_lock); + mutex_unlock(&kvm->slots_lock); if (!r && !write) r = put_user(addr, uaddr); diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 472b18ac92a24..188d2187eede9 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; struct vgic_redist_region *rdreg; gpa_t rd_base; - int ret; + int ret = 0; + + lockdep_assert_held(&kvm->slots_lock); + mutex_lock(&kvm->arch.config_lock); if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) - return 0; + goto out_unlock; /* * We may be creating VCPUs before having set the base address for the @@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) */ rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions); if (!rdreg) - return 0; + goto out_unlock; - if (!vgic_v3_check_base(kvm)) - return -EINVAL; + if (!vgic_v3_check_base(kvm)) { + ret = -EINVAL; + goto out_unlock; + } vgic_cpu->rdreg = rdreg; vgic_cpu->rdreg_index = rdreg->free_index; @@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); rd_dev->redist_vcpu = vcpu; - mutex_lock(&kvm->slots_lock); + mutex_unlock(&kvm->arch.config_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, 2 * SZ_64K, &rd_dev->dev); - mutex_unlock(&kvm->slots_lock); - if (ret) return ret; + /* Protected by slots_lock */ rdreg->free_index++; return 0; + +out_unlock: + mutex_unlock(&kvm->arch.config_lock); + return ret; } static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) @@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) /* The current c failed, so iterate over the previous ones. */ int i; - mutex_lock(&kvm->slots_lock); for (i = 0; i < c; i++) { vcpu = kvm_get_vcpu(kvm, i); vgic_unregister_redist_iodev(vcpu); } - mutex_unlock(&kvm->slots_lock); } return ret; @@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) { int ret; + mutex_lock(&kvm->arch.config_lock); ret = vgic_v3_alloc_redist_region(kvm, index, addr, count); + mutex_unlock(&kvm->arch.config_lock); if (ret) return ret; @@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) if (ret) { struct vgic_redist_region *rdreg; + mutex_lock(&kvm->arch.config_lock); rdreg = vgic_v3_rdist_region_from_index(kvm, index); vgic_v3_free_redist_region(rdreg); + mutex_unlock(&kvm->arch.config_lock); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index 1939c94e0b248..ff558c05e990c 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type type) { struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; - int ret = 0; unsigned int len; switch (type) { @@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, io_device->iodev_type = IODEV_DIST; io_device->redist_vcpu = NULL; - mutex_lock(&kvm->slots_lock); - ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, - len, &io_device->dev); - mutex_unlock(&kvm->slots_lock); - - return ret; + return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, + len, &io_device->dev); } diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c index 645648349c99b..7e9cdb78f7ce8 100644 --- a/arch/arm64/kvm/vgic/vgic-v2.c +++ b/arch/arm64/kvm/vgic/vgic-v2.c @@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm) return ret; } - ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); - if (ret) { - kvm_err("Unable to register VGIC MMIO regions\n"); - return ret; - } - if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, kvm_vgic_global_state.vcpu_base, diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 93a47a515c137..c3b8e132d5992 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int ret = 0; unsigned long c; kvm_for_each_vcpu(c, vcpu, kvm) { @@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm) return -EBUSY; } - ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); - if (ret) { - kvm_err("Unable to register VGICv3 dist MMIO regions\n"); - return ret; - } - if (kvm_vgic_global_state.has_gicv4_1) vgic_v4_configure_vsgis(kvm); From 9cf2f840c439b6b23bd99f584f2917ca425ae406 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 18 May 2023 11:09:16 +0100 Subject: [PATCH 040/303] KVM: arm64: vgic: Wrap vgic_its_create() with config_lock vgic_its_create() changes the vgic state without holding the config_lock, which triggers a lockdep warning in vgic_v4_init(): [ 358.667941] WARNING: CPU: 3 PID: 178 at arch/arm64/kvm/vgic/vgic-v4.c:245 vgic_v4_init+0x15c/0x7a8 ... [ 358.707410] vgic_v4_init+0x15c/0x7a8 [ 358.708550] vgic_its_create+0x37c/0x4a4 [ 358.709640] kvm_vm_ioctl+0x1518/0x2d80 [ 358.710688] __arm64_sys_ioctl+0x7ac/0x1ba8 [ 358.711960] invoke_syscall.constprop.0+0x70/0x1e0 [ 358.713245] do_el0_svc+0xe4/0x2d4 [ 358.714289] el0_svc+0x44/0x8c [ 358.715329] el0t_64_sync_handler+0xf4/0x120 [ 358.716615] el0t_64_sync+0x190/0x194 Wrap the whole of vgic_its_create() with config_lock since, in addition to calling vgic_v4_init(), it also modifies the global kvm->arch.vgic state. Fixes: f00327731131 ("KVM: arm64: Use config_lock to protect vgic state") Signed-off-by: Jean-Philippe Brucker Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230518100914.2837292-3-jean-philippe@linaro.org --- arch/arm64/kvm/vgic/vgic-its.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 750e51e3779a3..5fe2365a629f2 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm) static int vgic_its_create(struct kvm_device *dev, u32 type) { + int ret; struct vgic_its *its; if (type != KVM_DEV_TYPE_ARM_VGIC_ITS) @@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) if (!its) return -ENOMEM; + mutex_lock(&dev->kvm->arch.config_lock); + if (vgic_initialized(dev->kvm)) { - int ret = vgic_v4_init(dev->kvm); + ret = vgic_v4_init(dev->kvm); if (ret < 0) { + mutex_unlock(&dev->kvm->arch.config_lock); kfree(its); return ret; } @@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) /* Yep, even more trickery for lock ordering... */ #ifdef CONFIG_LOCKDEP - mutex_lock(&dev->kvm->arch.config_lock); mutex_lock(&its->cmd_lock); mutex_lock(&its->its_lock); mutex_unlock(&its->its_lock); mutex_unlock(&its->cmd_lock); - mutex_unlock(&dev->kvm->arch.config_lock); #endif its->vgic_its_base = VGIC_ADDR_UNDEF; @@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) dev->private = its; - return vgic_its_set_abi(its, NR_ITS_ABIS - 1); + ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1); + + mutex_unlock(&dev->kvm->arch.config_lock); + + return ret; } static void vgic_its_destroy(struct kvm_device *kvm_dev) From c38b8400aef99d63be2b1ff131bb993465dcafe1 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 18 May 2023 11:09:17 +0100 Subject: [PATCH 041/303] KVM: arm64: vgic: Fix locking comment It is now config_lock that must be held, not kvm lock. Replace the comment with a lockdep annotation. Fixes: f00327731131 ("KVM: arm64: Use config_lock to protect vgic state") Signed-off-by: Jean-Philippe Brucker Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230518100914.2837292-4-jean-philippe@linaro.org --- arch/arm64/kvm/vgic/vgic-v4.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 3bb0034780605..c1c28fe680ba3 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu) } } -/* Must be called with the kvm lock held */ void vgic_v4_configure_vsgis(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; unsigned long i; + lockdep_assert_held(&kvm->arch.config_lock); + kvm_arm_halt_guest(kvm); kvm_for_each_vcpu(i, vcpu, kvm) { From 62548732260976ca88fcb17ef98ab661e7ce7504 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Thu, 18 May 2023 11:09:18 +0100 Subject: [PATCH 042/303] KVM: arm64: vgic: Fix a comment It is host userspace, not the guest, that issues KVM_DEV_ARM_VGIC_GRP_CTRL Signed-off-by: Jean-Philippe Brucker Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230518100914.2837292-5-jean-philippe@linaro.org --- arch/arm64/kvm/vgic/vgic-init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index c199ba2f192ef..6eafc2c45cfcf 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm) /** * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest - * is a GICv2. A GICv3 must be explicitly initialized by the guest using the + * is a GICv2. A GICv3 must be explicitly initialized by userspace using the * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group. * @kvm: kvm struct pointer */ From 09cce60bddd6461a93a5bf434265a47827d1bc6f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 18 May 2023 10:58:44 +0100 Subject: [PATCH 043/303] KVM: arm64: Prevent unconditional donation of unmapped regions from the host Since host stage-2 mappings are created lazily, we cannot rely solely on the pte in order to recover the target physical address when checking a host-initiated memory transition as this permits donation of unmapped regions corresponding to MMIO or "no-map" memory. Instead of inspecting the pte, move the addr_is_allowed_memory() check into the host callback function where it is passed the physical address directly from the walker. Cc: Quentin Perret Fixes: e82edcc75c4e ("KVM: arm64: Implement do_share() helper for sharing memory") Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230518095844.1178-1-will@kernel.org --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 2e9ec4a2a4a32..a8813b2129966 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -575,7 +575,7 @@ struct pkvm_mem_donation { struct check_walk_data { enum pkvm_page_state desired; - enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); + enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr); }; static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, @@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, { struct check_walk_data *d = ctx->arg; - if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old))) - return -EINVAL; - - return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM; + return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM; } static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, @@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, return kvm_pgtable_walk(pgt, addr, size, &walker); } -static enum pkvm_page_state host_get_page_state(kvm_pte_t pte) +static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr) { + if (!addr_is_allowed_memory(addr)) + return PKVM_NOPAGE; + if (!kvm_pte_valid(pte) && pte) return PKVM_NOPAGE; @@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx return host_stage2_set_owner_locked(addr, size, host_id); } -static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) +static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr) { if (!kvm_pte_valid(pte)) return PKVM_NOPAGE; From 4d43acb145c363626d76f49febb4240c488cd1cf Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 15 May 2023 13:32:10 +0300 Subject: [PATCH 044/303] dmaengine: at_xdmac: fix potential Oops in at_xdmac_prep_interleaved() There are two place if the at_xdmac_interleaved_queue_desc() fails which could lead to a NULL dereference where "first" is NULL and we call list_add_tail(&first->desc_node, ...). In the first caller, the return is not checked so add a check for that. In the next caller, the return is checked but if it fails on the first iteration through the loop then it will lead to a NULL pointer dereference. Fixes: 4e5385784e69 ("dmaengine: at_xdmac: handle numf > 1") Fixes: 62b5cb757f1d ("dmaengine: at_xdmac: fix memory leak in interleaved mode") Signed-off-by: Dan Carpenter Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/21282b66-9860-410a-83df-39c17fcf2f1b@kili.mountain Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 7da6d9b6098e7..c3b37168b21f1 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1102,6 +1102,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, NULL, src_addr, dst_addr, xt, xt->sgl); + if (!first) + return NULL; /* Length of the block is (BLEN+1) microblocks. */ for (i = 0; i < xt->numf - 1; i++) @@ -1132,8 +1134,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, src_addr, dst_addr, xt, chunk); if (!desc) { - list_splice_tail_init(&first->descs_list, - &atchan->free_descs_list); + if (first) + list_splice_tail_init(&first->descs_list, + &atchan->free_descs_list); return NULL; } From 349e3c0cf239cc01d58a1e6c749e171de014cd6a Mon Sep 17 00:00:00 2001 From: Kalesh AP Date: Thu, 18 May 2023 01:10:59 -0700 Subject: [PATCH 045/303] RDMA/bnxt_re: Fix a possible memory leak Inside bnxt_qplib_create_cq(), when the check for NULL DPI fails, driver returns directly without freeing the memory allocated inside bnxt_qplib_alloc_init_hwq() routine. Fixed this by moving the check for NULL DPI before invoking bnxt_qplib_alloc_init_hwq(). Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver") Link: https://lore.kernel.org/r/1684397461-23082-2-git-send-email-selvin.xavier@broadcom.com Reviewed-by: Kashyap Desai Signed-off-by: Kalesh AP Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/qplib_fp.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index f139d4cd17128..8974f6235cfaa 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2056,6 +2056,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) u32 pg_sz_lvl; int rc; + if (!cq->dpi) { + dev_err(&rcfw->pdev->dev, + "FP: CREATE_CQ failed due to NULL DPI\n"); + return -EINVAL; + } + hwq_attr.res = res; hwq_attr.depth = cq->max_wqe; hwq_attr.stride = sizeof(struct cq_base); @@ -2069,11 +2075,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) CMDQ_BASE_OPCODE_CREATE_CQ, sizeof(req)); - if (!cq->dpi) { - dev_err(&rcfw->pdev->dev, - "FP: CREATE_CQ failed due to NULL DPI\n"); - return -EINVAL; - } req.dpi = cpu_to_le32(cq->dpi->dpi); req.cq_handle = cpu_to_le64(cq->cq_handle); req.cq_size = cpu_to_le32(cq->hwq.max_elements); From 0fa0d520e2a878cb4c94c4dc84395905d3f14f54 Mon Sep 17 00:00:00 2001 From: Kalesh AP Date: Thu, 18 May 2023 01:11:00 -0700 Subject: [PATCH 046/303] RDMA/bnxt_re: Fix return value of bnxt_re_process_raw_qp_pkt_rx bnxt_re_process_raw_qp_pkt_rx() always return 0 and ignores the return value of bnxt_re_post_send_shadow_qp(). Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver") Link: https://lore.kernel.org/r/1684397461-23082-3-git-send-email-selvin.xavier@broadcom.com Reviewed-by: Hongguang Gao Reviewed-by: Ajit Khaparde Signed-off-by: Kalesh AP Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index e86afecfbe46e..b1c36412025f3 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3341,9 +3341,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; /* post data received in the send queue */ - rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); - - return 0; + return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); } static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, From dd5fb04857d7346c9ea4901ec3ebe5b90b0e8868 Mon Sep 17 00:00:00 2001 From: Kalesh AP Date: Thu, 18 May 2023 01:11:01 -0700 Subject: [PATCH 047/303] RDMA/bnxt_re: Do not enable congestion control on VFs Congestion control needs to be enabled only on the PFs. FW fails the command if issued on VFs. Avoid sending the command on VFs. Fixes: f13bcef04ba0 ("RDMA/bnxt_re: Enable congestion control by default") Link: https://lore.kernel.org/r/1684397461-23082-4-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Kalesh AP Reviewed-by: Selvin Thyparampil Xavier Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/bnxt_re/main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index b9e2f89337e85..e34eccd178d0f 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -1336,6 +1336,10 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable) { struct bnxt_qplib_cc_param cc_param = {}; + /* Do not enable congestion control on VFs */ + if (rdev->is_virtfn) + return; + /* Currently enabling only for GenP5 adapters */ if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) return; From 626d312028bec44209d0ecd5beaa9b1aa8945f7d Mon Sep 17 00:00:00 2001 From: Masahiro Honda Date: Thu, 18 May 2023 20:08:16 +0900 Subject: [PATCH 048/303] iio: adc: ad_sigma_delta: Fix IRQ issue by setting IRQ_DISABLE_UNLAZY flag The Sigma-Delta ADCs supported by this driver can use SDO as an interrupt line to indicate the completion of a conversion. However, some devices cannot properly detect the completion of a conversion by an interrupt. This is for the reason mentioned in the following commit. commit e9849777d0e2 ("genirq: Add flag to force mask in disable_irq[_nosync]()") A read operation is performed by an extra interrupt before the completion of a conversion. At this time, the value read from the ADC data register is the same as the previous conversion result. This patch fixes the issue by setting IRQ_DISABLE_UNLAZY flag. Fixes: 0c6ef985a1fd ("iio: adc: ad7791: fix IRQ flags") Fixes: 1a913270e57a ("iio: adc: ad7793: Fix IRQ flag") Fixes: e081102f3077 ("iio: adc: ad7780: Fix IRQ flag") Fixes: 89a86da5cb8e ("iio: adc: ad7192: Add IRQ flag") Fixes: 79ef91493f54 ("iio: adc: ad7124: Set IRQ type to falling") Signed-off-by: Masahiro Honda Link: https://lore.kernel.org/r/20230518110816.248-1-honda@mechatrax.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad_sigma_delta.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index d8570f620785a..7e21928707437 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -584,6 +584,10 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de init_completion(&sigma_delta->completion); sigma_delta->irq_dis = true; + + /* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */ + irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, sigma_delta->spi->irq, ad_sd_data_rdy_trig_poll, sigma_delta->info->irq_flags | IRQF_NO_AUTOEN, From bbaae0c79ebd49f61ad942a8bf9e12bfc7f821bb Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Tue, 9 May 2023 15:22:02 +0000 Subject: [PATCH 049/303] iio: imu: inv_icm42600: fix timestamp reset Timestamp reset is not done in the correct place. It must be done before enabling buffer. The reason is that interrupt timestamping is always happening when the chip is on, even if the corresponding sensor is off. When the sensor restarts, timestamp is wrong if you don't do a reset first. Fixes: ec74ae9fd37c ("iio: imu: inv_icm42600: add accurate timestamping") Signed-off-by: Jean-Baptiste Maneyrol Cc: Link: https://lore.kernel.org/r/20230509152202.245444-1-inv.git-commit@tdk.com Signed-off-by: Jonathan Cameron --- drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c index 99576b2c171f4..32d7f83642303 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c @@ -275,9 +275,14 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev) { struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); struct device *dev = regmap_get_device(st->map); + struct inv_icm42600_timestamp *ts = iio_priv(indio_dev); pm_runtime_get_sync(dev); + mutex_lock(&st->lock); + inv_icm42600_timestamp_reset(ts); + mutex_unlock(&st->lock); + return 0; } @@ -375,7 +380,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev) struct device *dev = regmap_get_device(st->map); unsigned int sensor; unsigned int *watermark; - struct inv_icm42600_timestamp *ts; struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT; unsigned int sleep_temp = 0; unsigned int sleep_sensor = 0; @@ -385,11 +389,9 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev) if (indio_dev == st->indio_gyro) { sensor = INV_ICM42600_SENSOR_GYRO; watermark = &st->fifo.watermark.gyro; - ts = iio_priv(st->indio_gyro); } else if (indio_dev == st->indio_accel) { sensor = INV_ICM42600_SENSOR_ACCEL; watermark = &st->fifo.watermark.accel; - ts = iio_priv(st->indio_accel); } else { return -EINVAL; } @@ -417,8 +419,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev) if (!st->fifo.on) ret = inv_icm42600_set_temp_conf(st, false, &sleep_temp); - inv_icm42600_timestamp_reset(ts); - out_unlock: mutex_unlock(&st->lock); From e332003bb216a9f91e08004b9e2de0745f321290 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 30 Mar 2023 09:58:17 -0700 Subject: [PATCH 050/303] iommu: Make IPMMU_VMSA dependencies more strict On riscv64, linux-next-20233030 (and for several days earlier), there is a kconfig warning: WARNING: unmet direct dependencies detected for IOMMU_IO_PGTABLE_LPAE Depends on [n]: IOMMU_SUPPORT [=y] && (ARM || ARM64 || COMPILE_TEST [=n]) && !GENERIC_ATOMIC64 [=n] Selected by [y]: - IPMMU_VMSA [=y] && IOMMU_SUPPORT [=y] && (ARCH_RENESAS [=y] || COMPILE_TEST [=n]) && !GENERIC_ATOMIC64 [=n] and build errors: riscv64-linux-ld: drivers/iommu/io-pgtable-arm.o: in function `.L140': io-pgtable-arm.c:(.init.text+0x1e8): undefined reference to `alloc_io_pgtable_ops' riscv64-linux-ld: drivers/iommu/io-pgtable-arm.o: in function `.L168': io-pgtable-arm.c:(.init.text+0xab0): undefined reference to `free_io_pgtable_ops' riscv64-linux-ld: drivers/iommu/ipmmu-vmsa.o: in function `.L140': ipmmu-vmsa.c:(.text+0xbc4): undefined reference to `free_io_pgtable_ops' riscv64-linux-ld: drivers/iommu/ipmmu-vmsa.o: in function `.L0 ': ipmmu-vmsa.c:(.text+0x145e): undefined reference to `alloc_io_pgtable_ops' Add ARM || ARM64 || COMPILE_TEST dependencies to IPMMU_VMSA to prevent these issues, i.e., so that ARCH_RENESAS on RISC-V is not allowed. This makes the ARCH dependencies become: depends on (ARCH_RENESAS && (ARM || ARM64)) || COMPILE_TEST but that can be a bit hard to read. Fixes: 8292493c22c8 ("riscv: Kconfig.socs: Add ARCH_RENESAS kconfig option") Signed-off-by: Randy Dunlap Suggested-by: Geert Uytterhoeven Cc: Joerg Roedel Cc: Will Deacon Cc: Robin Murphy Cc: iommu@lists.linux.dev Cc: Conor Dooley Cc: Lad Prabhakar Reviewed-by: Robin Murphy Link: https://lore.kernel.org/r/20230330165817.21920-1-rdunlap@infradead.org Signed-off-by: Joerg Roedel --- drivers/iommu/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6de900776e24a..4d800601e8ecd 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -282,6 +282,7 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" depends on ARCH_RENESAS || COMPILE_TEST + depends on ARM || ARM64 || COMPILE_TEST depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE select IOMMU_API select IOMMU_IO_PGTABLE_LPAE From ec014683c564fb74fc68e8f5e84691d3b3839d24 Mon Sep 17 00:00:00 2001 From: Chao Wang Date: Mon, 17 Apr 2023 03:04:21 +0000 Subject: [PATCH 051/303] iommu/rockchip: Fix unwind goto issue Smatch complains that drivers/iommu/rockchip-iommu.c:1306 rk_iommu_probe() warn: missing unwind goto? The rk_iommu_probe function, after obtaining the irq value through platform_get_irq, directly returns an error if the returned value is negative, without releasing any resources. Fix this by adding a new error handling label "err_pm_disable" and use a goto statement to redirect to the error handling process. In order to preserve the original semantics, set err to the value of irq. Fixes: 1aa55ca9b14a ("iommu/rockchip: Move irq request past pm_runtime_enable") Signed-off-by: Chao Wang Reviewed-by: Dongliang Mu Reviewed-by: Heiko Stuebner Link: https://lore.kernel.org/r/20230417030421.2777-1-D202280639@hust.edu.cn Signed-off-by: Joerg Roedel --- drivers/iommu/rockchip-iommu.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index ea5a3088bb7e8..4054030c32379 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1335,20 +1335,22 @@ static int rk_iommu_probe(struct platform_device *pdev) for (i = 0; i < iommu->num_irq; i++) { int irq = platform_get_irq(pdev, i); - if (irq < 0) - return irq; + if (irq < 0) { + err = irq; + goto err_pm_disable; + } err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, IRQF_SHARED, dev_name(dev), iommu); - if (err) { - pm_runtime_disable(dev); - goto err_remove_sysfs; - } + if (err) + goto err_pm_disable; } dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); return 0; +err_pm_disable: + pm_runtime_disable(dev); err_remove_sysfs: iommu_device_sysfs_remove(&iommu->iommu); err_put_group: From ed8a2f4ddef2eaaf864ab1efbbca9788187036ab Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Wed, 19 Apr 2023 21:11:53 +0100 Subject: [PATCH 052/303] iommu/amd: Don't block updates to GATag if guest mode is on On KVM GSI routing table updates, specially those where they have vIOMMUs with interrupt remapping enabled (to boot >255vcpus setups without relying on KVM_FEATURE_MSI_EXT_DEST_ID), a VMM may update the backing VF MSIs with a new VCPU affinity. On AMD with AVIC enabled, the new vcpu affinity info is updated via: avic_pi_update_irte() irq_set_vcpu_affinity() amd_ir_set_vcpu_affinity() amd_iommu_{de}activate_guest_mode() Where the IRTE[GATag] is updated with the new vcpu affinity. The GATag contains VM ID and VCPU ID, and is used by IOMMU hardware to signal KVM (via GALog) when interrupt cannot be delivered due to vCPU is in blocking state. The issue is that amd_iommu_activate_guest_mode() will essentially only change IRTE fields on transitions from non-guest-mode to guest-mode and otherwise returns *with no changes to IRTE* on already configured guest-mode interrupts. To the guest this means that the VF interrupts remain affined to the first vCPU they were first configured, and guest will be unable to issue VF interrupts and receive messages like this from spurious interrupts (e.g. from waking the wrong vCPU in GALog): [ 167.759472] __common_interrupt: 3.34 No irq handler for vector [ 230.680927] mlx5_core 0000:00:02.0: mlx5_cmd_eq_recover:247:(pid 3122): Recovered 1 EQEs on cmd_eq [ 230.681799] mlx5_core 0000:00:02.0: wait_func_handle_exec_timeout:1113:(pid 3122): cmd[0]: CREATE_CQ(0x400) recovered after timeout [ 230.683266] __common_interrupt: 3.34 No irq handler for vector Given the fact that amd_ir_set_vcpu_affinity() uses amd_iommu_activate_guest_mode() underneath it essentially means that VCPU affinity changes of IRTEs are nops. Fix it by dropping the check for guest-mode at amd_iommu_activate_guest_mode(). Same thing is applicable to amd_iommu_deactivate_guest_mode() although, even if the IRTE doesn't change underlying DestID on the host, the VFIO IRQ handler will still be able to poke at the right guest-vCPU. Fixes: b9c6ff94e43a ("iommu/amd: Re-factor guest virtual APIC (de-)activation code") Signed-off-by: Joao Martins Reviewed-by: Suravee Suthikulpanit Link: https://lore.kernel.org/r/20230419201154.83880-2-joao.m.martins@oracle.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 4a314647d1f79..72845541ef2eb 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3493,8 +3493,7 @@ int amd_iommu_activate_guest_mode(void *data) struct irte_ga *entry = (struct irte_ga *) ir_data->entry; u64 valid; - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || - !entry || entry->lo.fields_vapic.guest_mode) + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry) return 0; valid = entry->lo.fields_vapic.valid; From af47b0a24058e56e983881993752f88288ca6511 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Wed, 19 Apr 2023 21:11:54 +0100 Subject: [PATCH 053/303] iommu/amd: Handle GALog overflows GALog exists to propagate interrupts into all vCPUs in the system when interrupts are marked as non running (e.g. when vCPUs aren't running). A GALog overflow happens when there's in no space in the log to record the GATag of the interrupt. So when the GALOverflow condition happens, the GALog queue is processed and the GALog is restarted, as the IOMMU manual indicates in section "2.7.4 Guest Virtual APIC Log Restart Procedure": | * Wait until MMIO Offset 2020h[GALogRun]=0b so that all request | entries are completed as circumstances allow. GALogRun must be 0b to | modify the guest virtual APIC log registers safely. | * Write MMIO Offset 0018h[GALogEn]=0b. | * As necessary, change the following values (e.g., to relocate or | resize the guest virtual APIC event log): | - the Guest Virtual APIC Log Base Address Register | [MMIO Offset 00E0h], | - the Guest Virtual APIC Log Head Pointer Register | [MMIO Offset 2040h][GALogHead], and | - the Guest Virtual APIC Log Tail Pointer Register | [MMIO Offset 2048h][GALogTail]. | * Write MMIO Offset 2020h[GALOverflow] = 1b to clear the bit (W1C). | * Write MMIO Offset 0018h[GALogEn] = 1b, and either set | MMIO Offset 0018h[GAIntEn] to enable the GA log interrupt or clear | the bit to disable it. Failing to handle the GALog overflow means that none of the VFs (in any guest) will work with IOMMU AVIC forcing the user to power cycle the host. When handling the event it resumes the GALog without resizing much like how it is done in the event handler overflow. The [MMIO Offset 2020h][GALOverflow] bit might be set in status register without the [MMIO Offset 2020h][GAInt] bit, so when deciding to poll for GA events (to clear space in the galog), also check the overflow bit. [suravee: Check for GAOverflow without GAInt, toggle CONTROL_GAINT_EN] Co-developed-by: Suravee Suthikulpanit Signed-off-by: Suravee Suthikulpanit Signed-off-by: Joao Martins Reviewed-by: Vasant Hegde Link: https://lore.kernel.org/r/20230419201154.83880-3-joao.m.martins@oracle.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 1 + drivers/iommu/amd/init.c | 24 ++++++++++++++++++++++++ drivers/iommu/amd/iommu.c | 9 ++++++++- 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index e98f20a9bdd82..e7ee2577f98d8 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -15,6 +15,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); +extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); extern void amd_iommu_uninit_devices(void); extern void amd_iommu_init_notifier(void); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 329a406cc37de..c2d80a4e5fb06 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -758,6 +758,30 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu) iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); } +/* + * This function restarts event logging in case the IOMMU experienced + * an GA log overflow. + */ +void amd_iommu_restart_ga_log(struct amd_iommu *iommu) +{ + u32 status; + + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (status & MMIO_STATUS_GALOG_RUN_MASK) + return; + + pr_info_ratelimited("IOMMU GA Log restarting\n"); + + iommu_feature_disable(iommu, CONTROL_GALOG_EN); + iommu_feature_disable(iommu, CONTROL_GAINT_EN); + + writel(MMIO_STATUS_GALOG_OVERFLOW_MASK, + iommu->mmio_base + MMIO_STATUS_OFFSET); + + iommu_feature_enable(iommu, CONTROL_GAINT_EN); + iommu_feature_enable(iommu, CONTROL_GALOG_EN); +} + /* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 72845541ef2eb..8c08d82733267 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -845,6 +845,7 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ MMIO_STATUS_EVT_INT_MASK | \ MMIO_STATUS_PPR_INT_MASK | \ + MMIO_STATUS_GALOG_OVERFLOW_MASK | \ MMIO_STATUS_GALOG_INT_MASK) irqreturn_t amd_iommu_int_thread(int irq, void *data) @@ -868,10 +869,16 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) } #ifdef CONFIG_IRQ_REMAP - if (status & MMIO_STATUS_GALOG_INT_MASK) { + if (status & (MMIO_STATUS_GALOG_INT_MASK | + MMIO_STATUS_GALOG_OVERFLOW_MASK)) { pr_devel("Processing IOMMU GA Log\n"); iommu_poll_ga_log(iommu); } + + if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) { + pr_info_ratelimited("IOMMU GA Log overflow\n"); + amd_iommu_restart_ga_log(iommu); + } #endif if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { From 8ec4e2befef10c7679cd59251956a428e783c0b5 Mon Sep 17 00:00:00 2001 From: Jerry Snitselaar Date: Thu, 20 Apr 2023 12:20:13 -0700 Subject: [PATCH 054/303] iommu/amd: Fix up merge conflict resolution Merge commit e17c6debd4b2 ("Merge branches 'arm/mediatek', 'arm/msm', 'arm/renesas', 'arm/rockchip', 'arm/smmu', 'x86/vt-d' and 'x86/amd' into next") added amd_iommu_init_devices, amd_iommu_uninit_devices, and amd_iommu_init_notifier back to drivers/iommu/amd/amd_iommu.h. The only references to them are here, so clean them up. Fixes: e17c6debd4b2 ("Merge branches 'arm/mediatek', 'arm/msm', 'arm/renesas', 'arm/rockchip', 'arm/smmu', 'x86/vt-d' and 'x86/amd' into next") Cc: Joerg Roedel Cc: Suravee Suthikulpanit Cc: Will Deacon Cc: Robin Murphy Signed-off-by: Jerry Snitselaar Reviewed-by: Vasant Hegde Link: https://lore.kernel.org/r/20230420192013.733331-1-jsnitsel@redhat.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index e7ee2577f98d8..9beeceb9d825d 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -16,9 +16,6 @@ extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); #ifdef CONFIG_AMD_IOMMU_DEBUGFS From 29f54745f24547a84b18582e054df9bea1a7bf3e Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 25 Apr 2023 16:04:15 -0300 Subject: [PATCH 055/303] iommu/amd: Add missing domain type checks Drivers are supposed to list the domain types they support in their domain_alloc() ops so when we add new domain types, like BLOCKING or SVA, they don't start breaking. This ended up providing an empty UNMANAGED domain when the core code asked for a BLOCKING domain, which happens to be the fallback for drivers that don't support it, but this is completely wrong for SVA. Check for the DMA types AMD supports and reject every other kind. Fixes: 136467962e49 ("iommu: Add IOMMU SVA domain support") Signed-off-by: Jason Gunthorpe Reviewed-by: Vasant Hegde Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/0-v1-2ac37b893728+da-amd_check_types_jgg@nvidia.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 8c08d82733267..3797e35df0357 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2074,7 +2074,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) { struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; - int pgtable = amd_iommu_pgtable; + int pgtable; int mode = DEFAULT_PGTABLE_LEVEL; int ret; @@ -2091,6 +2091,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) mode = PAGE_MODE_NONE; } else if (type == IOMMU_DOMAIN_UNMANAGED) { pgtable = AMD_IOMMU_V1; + } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) { + pgtable = amd_iommu_pgtable; + } else { + return NULL; } switch (pgtable) { From 2212fc2acf3f6ee690ea36506fb882a19d1bfcab Mon Sep 17 00:00:00 2001 From: Jon Pan-Doh Date: Wed, 26 Apr 2023 13:32:56 -0700 Subject: [PATCH 056/303] iommu/amd: Fix domain flush size when syncing iotlb When running on an AMD vIOMMU, we observed multiple invalidations (of decreasing power of 2 aligned sizes) when unmapping a single page. Domain flush takes gather bounds (end-start) as size param. However, gather->end is defined as the last inclusive address (start + size - 1). This leads to an off by 1 error. With this patch, verified that 1 invalidation occurs when unmapping a single page. Fixes: a270be1b3fdf ("iommu/amd: Use only natural aligned flushes in a VM") Cc: stable@vger.kernel.org # >= 5.15 Signed-off-by: Jon Pan-Doh Tested-by: Sudheer Dantuluri Suggested-by: Gary Zibrat Reviewed-by: Vasant Hegde Acked-by: Nadav Amit Link: https://lore.kernel.org/r/20230426203256.237116-1-pandoh@google.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 3797e35df0357..0f3ac4b489d67 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2398,7 +2398,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain, unsigned long flags; spin_lock_irqsave(&dom->lock, flags); - domain_flush_pages(dom, gather->start, gather->end - gather->start, 1); + domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1); amd_iommu_domain_flush_complete(dom); spin_unlock_irqrestore(&dom->lock, flags); } From 13247018d68f21e7132924b9853f7e2c423588b6 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Mon, 8 May 2023 18:22:17 +0200 Subject: [PATCH 057/303] scsi: target: iscsi: Fix hang in the iSCSI login code If the initiator suddenly stops sending data during a login while keeping the TCP connection open, the login_work won't be scheduled and will never release the login semaphore; concurrent login operations will therefore get stuck and fail. The bug is due to the inability of the login timeout code to properly handle this particular case. Fix the problem by replacing the old per-NP login timer with a new per-connection timer. The timer is started when an initiator connects to the target; if it expires, it sends a SIGINT signal to the thread pointed at by the conn->login_kworker pointer. conn->login_kworker is set by calling the iscsit_set_login_timer_kworker() helper, initially it will point to the np thread; When the login operation's control is in the process of being passed from the NP-thread to login_work, the conn->login_worker pointer is set to NULL. Finally, login_kworker will be changed to point to the worker thread executing the login_work job. If conn->login_kworker is NULL when the timer expires, it means that the login operation hasn't been completed yet but login_work isn't running, in this case the timer will mark the login process as failed and will schedule login_work so the latter will be forced to free the resources it holds. Signed-off-by: Maurizio Lombardi Link: https://lore.kernel.org/r/20230508162219.1731964-2-mlombard@redhat.com Reviewed-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/iscsi_target.c | 2 - drivers/target/iscsi/iscsi_target_login.c | 63 ++------------------ drivers/target/iscsi/iscsi_target_nego.c | 70 +++++++++++++---------- drivers/target/iscsi/iscsi_target_util.c | 51 +++++++++++++++++ drivers/target/iscsi/iscsi_target_util.h | 4 ++ include/target/iscsi/iscsi_target_core.h | 6 +- 6 files changed, 103 insertions(+), 93 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 834cce50f9b00..b516c2893420b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -364,8 +364,6 @@ struct iscsi_np *iscsit_add_np( init_completion(&np->np_restart_comp); INIT_LIST_HEAD(&np->np_list); - timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0); - ret = iscsi_target_setup_login_socket(np, sockaddr); if (ret != 0) { kfree(np); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 274bdd7845ca9..90b870f234f03 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -811,59 +811,6 @@ void iscsi_post_login_handler( iscsit_dec_conn_usage_count(conn); } -void iscsi_handle_login_thread_timeout(struct timer_list *t) -{ - struct iscsi_np *np = from_timer(np, t, np_login_timer); - - spin_lock_bh(&np->np_thread_lock); - pr_err("iSCSI Login timeout on Network Portal %pISpc\n", - &np->np_sockaddr); - - if (np->np_login_timer_flags & ISCSI_TF_STOP) { - spin_unlock_bh(&np->np_thread_lock); - return; - } - - if (np->np_thread) - send_sig(SIGINT, np->np_thread, 1); - - np->np_login_timer_flags &= ~ISCSI_TF_RUNNING; - spin_unlock_bh(&np->np_thread_lock); -} - -static void iscsi_start_login_thread_timer(struct iscsi_np *np) -{ - /* - * This used the TA_LOGIN_TIMEOUT constant because at this - * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout - */ - spin_lock_bh(&np->np_thread_lock); - np->np_login_timer_flags &= ~ISCSI_TF_STOP; - np->np_login_timer_flags |= ISCSI_TF_RUNNING; - mod_timer(&np->np_login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); - - pr_debug("Added timeout timer to iSCSI login request for" - " %u seconds.\n", TA_LOGIN_TIMEOUT); - spin_unlock_bh(&np->np_thread_lock); -} - -static void iscsi_stop_login_thread_timer(struct iscsi_np *np) -{ - spin_lock_bh(&np->np_thread_lock); - if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) { - spin_unlock_bh(&np->np_thread_lock); - return; - } - np->np_login_timer_flags |= ISCSI_TF_STOP; - spin_unlock_bh(&np->np_thread_lock); - - del_timer_sync(&np->np_login_timer); - - spin_lock_bh(&np->np_thread_lock); - np->np_login_timer_flags &= ~ISCSI_TF_RUNNING; - spin_unlock_bh(&np->np_thread_lock); -} - int iscsit_setup_np( struct iscsi_np *np, struct sockaddr_storage *sockaddr) @@ -1123,10 +1070,13 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np) spin_lock_init(&conn->nopin_timer_lock); spin_lock_init(&conn->response_queue_lock); spin_lock_init(&conn->state_lock); + spin_lock_init(&conn->login_worker_lock); + spin_lock_init(&conn->login_timer_lock); timer_setup(&conn->nopin_response_timer, iscsit_handle_nopin_response_timeout, 0); timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); + timer_setup(&conn->login_timer, iscsit_login_timeout, 0); if (iscsit_conn_set_transport(conn, np->np_transport) < 0) goto free_conn; @@ -1304,7 +1254,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) goto new_sess_out; } - iscsi_start_login_thread_timer(np); + iscsit_start_login_timer(conn, current); pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n"); conn->conn_state = TARG_CONN_STATE_XPT_UP; @@ -1417,8 +1367,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (ret < 0) goto new_sess_out; - iscsi_stop_login_thread_timer(np); - if (ret == 1) { tpg_np = conn->tpg_np; @@ -1434,7 +1382,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) new_sess_out: new_sess = true; old_sess_out: - iscsi_stop_login_thread_timer(np); + iscsit_stop_login_timer(conn); tpg_np = conn->tpg_np; iscsi_target_login_sess_out(conn, zero_tsih, new_sess); new_sess = false; @@ -1448,7 +1396,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) return 1; exit: - iscsi_stop_login_thread_timer(np); spin_lock_bh(&np->np_thread_lock); np->np_thread_state = ISCSI_NP_THREAD_EXIT; spin_unlock_bh(&np->np_thread_lock); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 24040c118e49d..e3a5644a70b36 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -535,25 +535,6 @@ static void iscsi_target_login_drop(struct iscsit_conn *conn, struct iscsi_login iscsi_target_login_sess_out(conn, zero_tsih, true); } -struct conn_timeout { - struct timer_list timer; - struct iscsit_conn *conn; -}; - -static void iscsi_target_login_timeout(struct timer_list *t) -{ - struct conn_timeout *timeout = from_timer(timeout, t, timer); - struct iscsit_conn *conn = timeout->conn; - - pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); - - if (conn->login_kworker) { - pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", - conn->login_kworker->comm, conn->login_kworker->pid); - send_sig(SIGINT, conn->login_kworker, 1); - } -} - static void iscsi_target_do_login_rx(struct work_struct *work) { struct iscsit_conn *conn = container_of(work, @@ -562,12 +543,15 @@ static void iscsi_target_do_login_rx(struct work_struct *work) struct iscsi_np *np = login->np; struct iscsi_portal_group *tpg = conn->tpg; struct iscsi_tpg_np *tpg_np = conn->tpg_np; - struct conn_timeout timeout; int rc, zero_tsih = login->zero_tsih; bool state; pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", conn, current->comm, current->pid); + + spin_lock(&conn->login_worker_lock); + set_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags); + spin_unlock(&conn->login_worker_lock); /* * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() * before initial PDU processing in iscsi_target_start_negotiation() @@ -597,19 +581,16 @@ static void iscsi_target_do_login_rx(struct work_struct *work) goto err; } - conn->login_kworker = current; allow_signal(SIGINT); - - timeout.conn = conn; - timer_setup_on_stack(&timeout.timer, iscsi_target_login_timeout, 0); - mod_timer(&timeout.timer, jiffies + TA_LOGIN_TIMEOUT * HZ); - pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid); + rc = iscsit_set_login_timer_kworker(conn, current); + if (rc < 0) { + /* The login timer has already expired */ + pr_debug("iscsi_target_do_login_rx, login failed\n"); + goto err; + } rc = conn->conn_transport->iscsit_get_login_rx(conn, login); - del_timer_sync(&timeout.timer); - destroy_timer_on_stack(&timeout.timer); flush_signals(current); - conn->login_kworker = NULL; if (rc < 0) goto err; @@ -646,7 +627,17 @@ static void iscsi_target_do_login_rx(struct work_struct *work) if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_WRITE_ACTIVE)) goto err; + + /* + * Set the login timer thread pointer to NULL to prevent the + * login process from getting stuck if the initiator + * stops sending data. + */ + rc = iscsit_set_login_timer_kworker(conn, NULL); + if (rc < 0) + goto err; } else if (rc == 1) { + iscsit_stop_login_timer(conn); cancel_delayed_work(&conn->login_work); iscsi_target_nego_release(conn); iscsi_post_login_handler(np, conn, zero_tsih); @@ -656,6 +647,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) err: iscsi_target_restore_sock_callbacks(conn); + iscsit_stop_login_timer(conn); cancel_delayed_work(&conn->login_work); iscsi_target_login_drop(conn, login); iscsit_deaccess_np(np, tpg, tpg_np); @@ -1368,14 +1360,30 @@ int iscsi_target_start_negotiation( * and perform connection cleanup now. */ ret = iscsi_target_do_login(conn, login); - if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) - ret = -1; + if (!ret) { + spin_lock(&conn->login_worker_lock); + + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) + ret = -1; + else if (!test_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags)) { + if (iscsit_set_login_timer_kworker(conn, NULL) < 0) { + /* + * The timeout has expired already. + * Schedule login_work to perform the cleanup. + */ + schedule_delayed_work(&conn->login_work, 0); + } + } + + spin_unlock(&conn->login_worker_lock); + } if (ret < 0) { iscsi_target_restore_sock_callbacks(conn); iscsi_remove_failed_auth_entry(conn); } if (ret != 0) { + iscsit_stop_login_timer(conn); cancel_delayed_work_sync(&conn->login_work); iscsi_target_nego_release(conn); } diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 26dc8ed3045b6..b14835fcb0334 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1040,6 +1040,57 @@ void iscsit_stop_nopin_timer(struct iscsit_conn *conn) spin_unlock_bh(&conn->nopin_timer_lock); } +void iscsit_login_timeout(struct timer_list *t) +{ + struct iscsit_conn *conn = from_timer(conn, t, login_timer); + struct iscsi_login *login = conn->login; + + pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); + + spin_lock_bh(&conn->login_timer_lock); + login->login_failed = 1; + + if (conn->login_kworker) { + pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n", + conn->login_kworker->comm, conn->login_kworker->pid); + send_sig(SIGINT, conn->login_kworker, 1); + } else { + schedule_delayed_work(&conn->login_work, 0); + } + spin_unlock_bh(&conn->login_timer_lock); +} + +void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr) +{ + pr_debug("Login timer started\n"); + + conn->login_kworker = kthr; + mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); +} + +int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr) +{ + struct iscsi_login *login = conn->login; + int ret = 0; + + spin_lock_bh(&conn->login_timer_lock); + if (login->login_failed) { + /* The timer has already expired */ + ret = -1; + } else { + conn->login_kworker = kthr; + } + spin_unlock_bh(&conn->login_timer_lock); + + return ret; +} + +void iscsit_stop_login_timer(struct iscsit_conn *conn) +{ + pr_debug("Login timer stopped\n"); + timer_delete_sync(&conn->login_timer); +} + int iscsit_send_tx_data( struct iscsit_cmd *cmd, struct iscsit_conn *conn, diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 33ea799a08508..24b8e577575a6 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -56,6 +56,10 @@ extern void iscsit_handle_nopin_timeout(struct timer_list *t); extern void __iscsit_start_nopin_timer(struct iscsit_conn *); extern void iscsit_start_nopin_timer(struct iscsit_conn *); extern void iscsit_stop_nopin_timer(struct iscsit_conn *); +extern void iscsit_login_timeout(struct timer_list *t); +extern void iscsit_start_login_timer(struct iscsit_conn *, struct task_struct *kthr); +extern void iscsit_stop_login_timer(struct iscsit_conn *); +extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_struct *kthr); extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int); extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *); extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8); diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 229118156a1f6..42f4a4c0c1003 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -562,12 +562,14 @@ struct iscsit_conn { #define LOGIN_FLAGS_READ_ACTIVE 2 #define LOGIN_FLAGS_WRITE_ACTIVE 3 #define LOGIN_FLAGS_CLOSED 4 +#define LOGIN_FLAGS_WORKER_RUNNING 5 unsigned long login_flags; struct delayed_work login_work; struct iscsi_login *login; struct timer_list nopin_timer; struct timer_list nopin_response_timer; struct timer_list transport_timer; + struct timer_list login_timer; struct task_struct *login_kworker; /* Spinlock used for add/deleting cmd's from conn_cmd_list */ spinlock_t cmd_lock; @@ -576,6 +578,8 @@ struct iscsit_conn { spinlock_t nopin_timer_lock; spinlock_t response_queue_lock; spinlock_t state_lock; + spinlock_t login_timer_lock; + spinlock_t login_worker_lock; /* libcrypto RX and TX contexts for crc32c */ struct ahash_request *conn_rx_hash; struct ahash_request *conn_tx_hash; @@ -792,7 +796,6 @@ struct iscsi_np { enum np_thread_state_table np_thread_state; bool enabled; atomic_t np_reset_count; - enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; spinlock_t np_thread_lock; @@ -800,7 +803,6 @@ struct iscsi_np { struct socket *np_socket; struct sockaddr_storage np_sockaddr; struct task_struct *np_thread; - struct timer_list np_login_timer; void *np_context; struct iscsit_transport *np_transport; struct list_head np_list; From 98a8c2bf938a5973716f280da618077a3d255976 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Mon, 8 May 2023 18:22:18 +0200 Subject: [PATCH 058/303] scsi: target: iscsi: Remove unused transport_timer Signed-off-by: Maurizio Lombardi Link: https://lore.kernel.org/r/20230508162219.1731964-3-mlombard@redhat.com Reviewed-by: Mike Christie Signed-off-by: Martin K. Petersen --- include/target/iscsi/iscsi_target_core.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 42f4a4c0c1003..4c15420e8965d 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -568,7 +568,6 @@ struct iscsit_conn { struct iscsi_login *login; struct timer_list nopin_timer; struct timer_list nopin_response_timer; - struct timer_list transport_timer; struct timer_list login_timer; struct task_struct *login_kworker; /* Spinlock used for add/deleting cmd's from conn_cmd_list */ From 2a737d3b8c792400118d6cf94958f559de9c5e59 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Mon, 8 May 2023 18:22:19 +0200 Subject: [PATCH 059/303] scsi: target: iscsi: Prevent login threads from racing between each other The tpg->np_login_sem is a semaphore that is used to serialize the login process when multiple login threads run concurrently against the same target portal group. The iscsi_target_locate_portal() function finds the tpg, calls iscsit_access_np() against the np_login_sem semaphore and saves the tpg pointer in conn->tpg; If iscsi_target_locate_portal() fails, the caller will check for the conn->tpg pointer and, if it's not NULL, then it will assume that iscsi_target_locate_portal() called iscsit_access_np() on the semaphore. Make sure that conn->tpg gets initialized only if iscsit_access_np() was successful, otherwise iscsit_deaccess_np() may end up being called against a semaphore we never took, allowing more than one thread to access the same tpg. Signed-off-by: Maurizio Lombardi Link: https://lore.kernel.org/r/20230508162219.1731964-4-mlombard@redhat.com Reviewed-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/target/iscsi/iscsi_target_nego.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index e3a5644a70b36..fa3fb5f4e6bc4 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -1122,6 +1122,7 @@ int iscsi_target_locate_portal( iscsi_target_set_sock_callbacks(conn); login->np = np; + conn->tpg = NULL; login_req = (struct iscsi_login_req *) login->req; payload_length = ntoh24(login_req->dlength); @@ -1189,7 +1190,6 @@ int iscsi_target_locate_portal( */ sessiontype = strncmp(s_buf, DISCOVERY, 9); if (!sessiontype) { - conn->tpg = iscsit_global->discovery_tpg; if (!login->leading_connection) goto get_target; @@ -1206,9 +1206,11 @@ int iscsi_target_locate_portal( * Serialize access across the discovery struct iscsi_portal_group to * process login attempt. */ + conn->tpg = iscsit_global->discovery_tpg; if (iscsit_access_np(np, conn->tpg) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE); + conn->tpg = NULL; ret = -1; goto out; } From d54820b22e404b06b2b65877ff802cc7b31688bc Mon Sep 17 00:00:00 2001 From: Gleb Chesnokov Date: Wed, 17 May 2023 11:22:35 +0300 Subject: [PATCH 060/303] scsi: qla2xxx: Fix NULL pointer dereference in target mode When target mode is enabled, the pci_irq_get_affinity() function may return a NULL value in qla_mapq_init_qp_cpu_map() due to the qla24xx_enable_msix() code that handles IRQ settings for target mode. This leads to a crash due to a NULL pointer dereference. This patch fixes the issue by adding a check for the NULL value returned by pci_irq_get_affinity() and introducing a 'cpu_mapped' boolean flag to the qla_qpair structure, ensuring that the qpair's CPU affinity is updated when it has not been mapped to a CPU. Fixes: 1d201c81d4cc ("scsi: qla2xxx: Select qpair depending on which CPU post_cmd() gets called") Signed-off-by: Gleb Chesnokov Link: https://lore.kernel.org/r/56b416f2-4e0f-b6cf-d6d5-b7c372e3c6a2@scst.dev Reviewed-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_init.c | 3 +++ drivers/scsi/qla2xxx/qla_inline.h | 3 +++ drivers/scsi/qla2xxx/qla_isr.c | 3 +++ 4 files changed, 10 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index df5e5b7fdcfe7..84aa3571be6d4 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -3796,6 +3796,7 @@ struct qla_qpair { uint64_t retry_term_jiff; struct qla_tgt_counters tgt_counters; uint16_t cpuid; + bool cpu_mapped; struct qla_fw_resources fwres ____cacheline_aligned; struct qla_buf_pool buf_pool; u32 cmd_cnt; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index ec0423ec66817..1a955c3ff3d6c 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -9426,6 +9426,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; + if (!qpair->cpu_mapped) + qla_cpu_update(qpair, raw_smp_processor_id()); + if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) qpair->difdix_supported = 1; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index cce6e425c1214..7b42558a8839a 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -539,11 +539,14 @@ qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha, if (!ha->qp_cpu_map) return; mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0); + if (!mask) + return; qpair->cpuid = cpumask_first(mask); for_each_cpu(cpu, mask) { ha->qp_cpu_map[cpu] = qpair; } msix->cpuid = qpair->cpuid; + qpair->cpu_mapped = true; } static inline void diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 71feda2cdb630..245e3a5d81fd3 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3770,6 +3770,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { rsp->qpair->rcv_intr = 1; + + if (!rsp->qpair->cpu_mapped) + qla_cpu_update(rsp->qpair, raw_smp_processor_id()); } #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \ From 11c439a19466e7feaccdbce148a75372fddaf4e9 Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Thu, 18 May 2023 05:43:51 +0000 Subject: [PATCH 061/303] iommu/amd/pgtbl_v2: Fix domain max address IOMMU v2 page table supports 4 level (47 bit) or 5 level (56 bit) virtual address space. Current code assumes it can support 64bit IOVA address space. If IOVA allocator allocates virtual address > 47/56 bit (depending on page table level) then it will do wrong mapping and cause invalid translation. Hence adjust aperture size to use max address supported by the page table. Reported-by: Jerry Snitselaar Fixes: aaac38f61487 ("iommu/amd: Initial support for AMD IOMMU v2 page table") Cc: # v6.0+ Cc: Suravee Suthikulpanit Signed-off-by: Vasant Hegde Reviewed-by: Jerry Snitselaar Link: https://lore.kernel.org/r/20230518054351.9626-1-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 0f3ac4b489d67..dc1ec68497754 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2129,6 +2129,15 @@ static struct protection_domain *protection_domain_alloc(unsigned int type) return NULL; } +static inline u64 dma_max_address(void) +{ + if (amd_iommu_pgtable == AMD_IOMMU_V1) + return ~0ULL; + + /* V2 with 4/5 level page table */ + return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); +} + static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { struct protection_domain *domain; @@ -2145,7 +2154,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) return NULL; domain->domain.geometry.aperture_start = 0; - domain->domain.geometry.aperture_end = ~0ULL; + domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; return &domain->domain; From d9eef346b601afb0bd74b49e0db06f6a5cebd030 Mon Sep 17 00:00:00 2001 From: Jason Gerecke Date: Fri, 14 Apr 2023 11:22:10 -0700 Subject: [PATCH 062/303] HID: wacom: Check for string overflow from strscpy calls The strscpy function is able to return an error code when a copy would overflow the size of the destination. The copy is stopped and the buffer terminated before overflow actually occurs so it is safe to continue execution, but we should still produce a warning should this occur. Signed-off-by: Jason Gerecke Reviewed-by: Ping Cheng Reviewed-by: Peter Hutterer Signed-off-by: Jiri Kosina --- drivers/hid/wacom_sys.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 8214896adadad..7192970d199a0 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2224,7 +2224,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) } else if (strstr(product_name, "Wacom") || strstr(product_name, "wacom") || strstr(product_name, "WACOM")) { - strscpy(name, product_name, sizeof(name)); + if (strscpy(name, product_name, sizeof(name)) < 0) { + hid_warn(wacom->hdev, "String overflow while assembling device name"); + } } else { snprintf(name, sizeof(name), "Wacom %s", product_name); } @@ -2242,7 +2244,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) if (name[strlen(name)-1] == ' ') name[strlen(name)-1] = '\0'; } else { - strscpy(name, features->name, sizeof(name)); + if (strscpy(name, features->name, sizeof(name)) < 0) { + hid_warn(wacom->hdev, "String overflow while assembling device name"); + } } snprintf(wacom_wac->name, sizeof(wacom_wac->name), "%s%s", @@ -2500,8 +2504,10 @@ static void wacom_wireless_work(struct work_struct *work) goto fail; } - strscpy(wacom_wac->name, wacom_wac1->name, - sizeof(wacom_wac->name)); + if (strscpy(wacom_wac->name, wacom_wac1->name, + sizeof(wacom_wac->name)) < 0) { + hid_warn(wacom->hdev, "String overflow while assembling device name"); + } } return; From bd249b91977b768ea02bf84d04625d2690ad2b98 Mon Sep 17 00:00:00 2001 From: Nikita Zhandarovich Date: Mon, 17 Apr 2023 09:01:48 -0700 Subject: [PATCH 063/303] HID: wacom: avoid integer overflow in wacom_intuos_inout() If high bit is set to 1 in ((data[3] & 0x0f << 28), after all arithmetic operations and integer promotions are done, high bits in wacom->serial[idx] will be filled with 1s as well. Avoid this, albeit unlikely, issue by specifying left operand's __u64 type for the right operand. Found by Linux Verification Center (linuxtesting.org) with static analysis tool SVACE. Fixes: 3bea733ab212 ("USB: wacom tablet driver reorganization") Signed-off-by: Nikita Zhandarovich Reviewed-by: Ping Cheng Cc: stable@vger.kernel.org Signed-off-by: Jiri Kosina --- drivers/hid/wacom_wac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index dc0f7d9a992c1..2ccf838371343 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -831,7 +831,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) /* Enter report */ if ((data[1] & 0xfc) == 0xc0) { /* serial number of the tool */ - wacom->serial[idx] = ((data[3] & 0x0f) << 28) + + wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) + (data[4] << 20) + (data[5] << 12) + (data[6] << 4) + (data[7] >> 4); From ed84c4517a5bc536e8572a01dfa11bc22a280d06 Mon Sep 17 00:00:00 2001 From: Sung-Chi Li Date: Mon, 24 Apr 2023 10:37:36 +0800 Subject: [PATCH 064/303] HID: google: add jewel USB id Add 1 additional hammer-like device. Signed-off-by: Sung-Chi Li Signed-off-by: Jiri Kosina --- drivers/hid/hid-google-hammer.c | 2 ++ drivers/hid/hid-ids.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 7ae5f27df54dd..c6bdb9c4ef3e0 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -586,6 +586,8 @@ static const struct hid_device_id hammer_devices[] = { USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_JEWEL) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index d79e946acdcbe..5d29abac2300e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -529,6 +529,7 @@ #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 #define USB_DEVICE_ID_GOOGLE_DON 0x5050 #define USB_DEVICE_ID_GOOGLE_EEL 0x5057 +#define USB_DEVICE_ID_GOOGLE_JEWEL 0x5061 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f From 16a9c24f24fbe4564284eb575b18cc20586b9270 Mon Sep 17 00:00:00 2001 From: Denis Arefev Date: Thu, 27 Apr 2023 14:47:45 +0300 Subject: [PATCH 065/303] HID: wacom: Add error check to wacom_parse_and_register() Added a variable check and transition in case of an error Found by Linux Verification Center (linuxtesting.org) with SVACE. Signed-off-by: Denis Arefev Reviewed-by: Ping Cheng Signed-off-by: Jiri Kosina --- drivers/hid/wacom_sys.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 7192970d199a0..76e5353aca0c7 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2414,8 +2414,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) goto fail_quirks; } - if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) + if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) { error = hid_hw_open(hdev); + if (error) { + hid_err(hdev, "hw open failed\n"); + goto fail_quirks; + } + } wacom_set_shared_values(wacom_wac); devres_close_group(&hdev->dev, wacom); From d53d70084d27f56bcdf5074328f2c9ec861be596 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 17 May 2023 12:26:44 -0400 Subject: [PATCH 066/303] nfsd: make a copy of struct iattr before calling notify_change notify_change can modify the iattr structure. In particular it can end up setting ATTR_MODE when ATTR_KILL_SUID is already set, causing a BUG() if the same iattr is passed to notify_change more than once. Make a copy of the struct iattr before calling notify_change. Reported-by: Zhi Li Link: https://bugzilla.redhat.com/show_bug.cgi?id=2207969 Tested-by: Zhi Li Fixes: 34b91dda7124 ("NFSD: Make nfsd4_setattr() wait before returning NFS4ERR_DELAY") Signed-off-by: Jeff Layton Signed-off-by: Chuck Lever --- fs/nfsd/vfs.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index bb9d471721623..db67f8e19344a 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -536,7 +536,15 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, inode_lock(inode); for (retries = 1;;) { - host_err = __nfsd_setattr(dentry, iap); + struct iattr attrs; + + /* + * notify_change() can alter its iattr argument, making + * @iap unsuitable for submission multiple times. Make a + * copy for every loop iteration. + */ + attrs = *iap; + host_err = __nfsd_setattr(dentry, &attrs); if (host_err != -EAGAIN || !retries--) break; if (!nfsd_wait_for_delegreturn(rqstp, inode)) From ee7751b564a90f337330efc1221df40647d68756 Mon Sep 17 00:00:00 2001 From: Beau Belgrave Date: Fri, 5 May 2023 13:58:55 -0700 Subject: [PATCH 067/303] tracing/user_events: Use long vs int for atomic bit ops Each event stores a int to track which bit to set/clear when enablement changes. On big endian 64-bit configurations, it's possible this could cause memory corruption when it's used for atomic bit operations. Use unsigned long for enablement values to ensure any possible corruption cannot occur. Downcast to int after mask for the bit target. Link: https://lore.kernel.org/all/6f758683-4e5e-41c3-9b05-9efc703e827c@kili.mountain/ Link: https://lore.kernel.org/linux-trace-kernel/20230505205855.6407-1-beaub@linux.microsoft.com Fixes: dcb8177c1395 ("tracing/user_events: Add ioctl for disabling addresses") Reported-by: Dan Carpenter Signed-off-by: Beau Belgrave Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_events_user.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index b1ecd76776427..e37c7f168c447 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -101,7 +101,7 @@ struct user_event_enabler { unsigned long addr; /* Track enable bit, flags, etc. Aligned for bitops. */ - unsigned int values; + unsigned long values; }; /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */ @@ -116,7 +116,9 @@ struct user_event_enabler { /* Only duplicate the bit value */ #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK -#define ENABLE_BITOPS(e) ((unsigned long *)&(e)->values) +#define ENABLE_BITOPS(e) (&(e)->values) + +#define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK)) /* Used for asynchronous faulting in of pages */ struct user_event_enabler_fault { @@ -423,9 +425,9 @@ static int user_event_enabler_write(struct user_event_mm *mm, /* Update bit atomically, user tracers must be atomic as well */ if (enabler->event && enabler->event->status) - set_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr); + set_bit(ENABLE_BIT(enabler), ptr); else - clear_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr); + clear_bit(ENABLE_BIT(enabler), ptr); kunmap_local(kaddr); unpin_user_pages_dirty_lock(&page, 1, true); @@ -440,8 +442,7 @@ static bool user_event_enabler_exists(struct user_event_mm *mm, struct user_event_enabler *next; list_for_each_entry_safe(enabler, next, &mm->enablers, link) { - if (enabler->addr == uaddr && - (enabler->values & ENABLE_VAL_BIT_MASK) == bit) + if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit) return true; } @@ -2272,7 +2273,7 @@ static long user_events_ioctl_unreg(unsigned long uarg) list_for_each_entry_safe(enabler, next, &mm->enablers, link) if (enabler->addr == reg.disable_addr && - (enabler->values & ENABLE_VAL_BIT_MASK) == reg.disable_bit) { + ENABLE_BIT(enabler) == reg.disable_bit) { set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)); if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler))) From 632478a05821bc1c9b55c3a1dd0fb1be7bfa1acc Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Thu, 11 May 2023 18:32:01 +0200 Subject: [PATCH 068/303] tracing/timerlat: Always wakeup the timerlat thread While testing rtla timerlat auto analysis, I reach a condition where the interface was not receiving tracing data. I was able to manually reproduce the problem with these steps: # echo 0 > tracing_on # disable trace # echo 1 > osnoise/stop_tracing_us # stop trace if timerlat irq > 1 us # echo timerlat > current_tracer # enable timerlat tracer # sleep 1 # wait... that is the time when rtla # apply configs like prio or cgroup # echo 1 > tracing_on # start tracing # cat trace # tracer: timerlat # # _-----=> irqs-off # / _----=> need-resched # | / _---=> hardirq/softirq # || / _--=> preempt-depth # ||| / _-=> migrate-disable # |||| / delay # ||||| ACTIVATION # TASK-PID CPU# ||||| TIMESTAMP ID CONTEXT LATENCY # | | | ||||| | | | | NOTHING! Then, trying to enable tracing again with echo 1 > tracing_on resulted in no change: the trace was still not tracing. This problem happens because the timerlat IRQ hits the stop tracing condition while tracing is off, and do not wake up the timerlat thread, so the timerlat threads are kept sleeping forever, resulting in no trace, even after re-enabling the tracer. Avoid this condition by always waking up the threads, even after stopping tracing, allowing the tracer to return to its normal operating after a new tracing on. Link: https://lore.kernel.org/linux-trace-kernel/1ed8f830638b20a39d535d27d908e319a9a3c4e2.1683822622.git.bristot@kernel.org Cc: Juri Lelli Cc: stable@vger.kernel.org Fixes: a955d7eac177 ("trace: Add timerlat tracer") Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_osnoise.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index efbbec2caff83..e97e3fa5cbed0 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -1652,6 +1652,8 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer) osnoise_stop_tracing(); notify_new_max_latency(diff); + wake_up_process(tlat->kthread); + return HRTIMER_NORESTART; } } From 3e0fea09b17fa2255f6cb0108bbffd4a505f8925 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 19 May 2023 16:07:38 -0700 Subject: [PATCH 069/303] tracing/user_events: Split up mm alloc and attach When a new mm is being created in a fork() path it currently is allocated and then attached in one go. This leaves the mm exposed out to the tracing register callbacks while any parent enabler locations are copied in. This should not happen. Split up mm alloc and attach as unique operations. When duplicating enablers, first alloc, then duplicate, and only upon success, attach. This prevents any timing window outside of the event_reg mutex for enablement walking. This allows for dropping RCU requirement for enablement walking in later patches. Link: https://lkml.kernel.org/r/20230519230741.669-2-beaub@linux.microsoft.com Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=whTBvXJuoi_kACo3qi5WZUmRrhyA-_=rRFsycTytmB6qw@mail.gmail.com/ Signed-off-by: Linus Torvalds [ change log written by Beau Belgrave ] Signed-off-by: Beau Belgrave Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_events_user.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index e37c7f168c447..599aab46a94b8 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -539,10 +539,9 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user) return found; } -static struct user_event_mm *user_event_mm_create(struct task_struct *t) +static struct user_event_mm *user_event_mm_alloc(struct task_struct *t) { struct user_event_mm *user_mm; - unsigned long flags; user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT); @@ -554,12 +553,6 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t) refcount_set(&user_mm->refcnt, 1); refcount_set(&user_mm->tasks, 1); - spin_lock_irqsave(&user_event_mms_lock, flags); - list_add_rcu(&user_mm->link, &user_event_mms); - spin_unlock_irqrestore(&user_event_mms_lock, flags); - - t->user_event_mm = user_mm; - /* * The lifetime of the memory descriptor can slightly outlast * the task lifetime if a ref to the user_event_mm is taken @@ -573,6 +566,17 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t) return user_mm; } +static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t) +{ + unsigned long flags; + + spin_lock_irqsave(&user_event_mms_lock, flags); + list_add_rcu(&user_mm->link, &user_event_mms); + spin_unlock_irqrestore(&user_event_mms_lock, flags); + + t->user_event_mm = user_mm; +} + static struct user_event_mm *current_user_event_mm(void) { struct user_event_mm *user_mm = current->user_event_mm; @@ -580,10 +584,12 @@ static struct user_event_mm *current_user_event_mm(void) if (user_mm) goto inc; - user_mm = user_event_mm_create(current); + user_mm = user_event_mm_alloc(current); if (!user_mm) goto error; + + user_event_mm_attach(user_mm, current); inc: refcount_inc(&user_mm->refcnt); error: @@ -671,7 +677,7 @@ void user_event_mm_remove(struct task_struct *t) void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm) { - struct user_event_mm *mm = user_event_mm_create(t); + struct user_event_mm *mm = user_event_mm_alloc(t); struct user_event_enabler *enabler; if (!mm) @@ -685,10 +691,11 @@ void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm) rcu_read_unlock(); + user_event_mm_attach(mm, t); return; error: rcu_read_unlock(); - user_event_mm_remove(t); + user_event_mm_destroy(mm); } static bool current_user_event_enabler_exists(unsigned long uaddr, From aaecdaf922835ed9a8ce56cdd9a8d40fe630257a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 19 May 2023 16:07:39 -0700 Subject: [PATCH 070/303] tracing/user_events: Remove RCU lock while pinning pages pin_user_pages_remote() can reschedule which means we cannot hold any RCU lock while using it. Now that enablers are not exposed out to the tracing register callbacks during fork(), there is clearly no need to require the RCU lock as event_mutex is enough to protect changes. Remove unneeded RCU usages when pinning pages and walking enablers with event_mutex held. Cleanup a misleading "safe" list walk that is not needed. During fork() duplication, remove unneeded RCU list add, since the list is not exposed yet. Link: https://lkml.kernel.org/r/20230519230741.669-3-beaub@linux.microsoft.com Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wiiBfT4zNS29jA0XEsy8EmbqTH1hAPdRJCDAJMD8Gxt5A@mail.gmail.com/ Fixes: 7235759084a4 ("tracing/user_events: Use remote writes for event enablement") Signed-off-by: Linus Torvalds [ change log written by Beau Belgrave ] Signed-off-by: Beau Belgrave Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_events_user.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 599aab46a94b8..d34a59630e70c 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -439,9 +439,8 @@ static bool user_event_enabler_exists(struct user_event_mm *mm, unsigned long uaddr, unsigned char bit) { struct user_event_enabler *enabler; - struct user_event_enabler *next; - list_for_each_entry_safe(enabler, next, &mm->enablers, link) { + list_for_each_entry(enabler, &mm->enablers, link) { if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit) return true; } @@ -456,19 +455,19 @@ static void user_event_enabler_update(struct user_event *user) struct user_event_mm *next; int attempt; + lockdep_assert_held(&event_mutex); + while (mm) { next = mm->next; mmap_read_lock(mm->mm); - rcu_read_lock(); - list_for_each_entry_rcu(enabler, &mm->enablers, link) { + list_for_each_entry(enabler, &mm->enablers, link) { if (enabler->event == user) { attempt = 0; user_event_enabler_write(mm, enabler, true, &attempt); } } - rcu_read_unlock(); mmap_read_unlock(mm->mm); user_event_mm_put(mm); mm = next; @@ -496,7 +495,9 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig, enabler->values = orig->values & ENABLE_VAL_DUP_MASK; refcount_inc(&enabler->event->refcnt); - list_add_rcu(&enabler->link, &mm->enablers); + + /* Enablers not exposed yet, RCU not required */ + list_add(&enabler->link, &mm->enablers); return true; } From dcbd1ac2668b5fa02069ea96d581ca3f70a7543c Mon Sep 17 00:00:00 2001 From: Beau Belgrave Date: Fri, 19 May 2023 16:07:40 -0700 Subject: [PATCH 071/303] tracing/user_events: Rename link fields for clarity Currently most list_head fields of various structs within user_events are simply named link. This causes folks to keep additional context in their head when working with the code, which can be confusing. Instead of using link, describe what the actual link is, for example: list_del_rcu(&mm->link); Changes into: list_del_rcu(&mm->mms_link); The reader now is given a hint the link is to the mms global list instead of having to remember or spot check within the code. Link: https://lkml.kernel.org/r/20230519230741.669-4-beaub@linux.microsoft.com Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wicngggxVpbnrYHjRTwGE0WYscPRM+L2HO2BF8ia1EXgQ@mail.gmail.com/ Suggested-by: Linus Torvalds Signed-off-by: Beau Belgrave Signed-off-by: Steven Rostedt (Google) --- include/linux/user_events.h | 2 +- kernel/trace/trace_events_user.c | 40 ++++++++++++++++++-------------- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/include/linux/user_events.h b/include/linux/user_events.h index 2847f5a18a861..17d452b389dec 100644 --- a/include/linux/user_events.h +++ b/include/linux/user_events.h @@ -17,7 +17,7 @@ #ifdef CONFIG_USER_EVENTS struct user_event_mm { - struct list_head link; + struct list_head mms_link; struct list_head enablers; struct mm_struct *mm; struct user_event_mm *next; diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index d34a59630e70c..238c7a0615fad 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -96,7 +96,7 @@ struct user_event { * these to track enablement sites that are tied to an event. */ struct user_event_enabler { - struct list_head link; + struct list_head mm_enablers_link; struct user_event *event; unsigned long addr; @@ -155,7 +155,7 @@ struct user_event_file_info { #define VALIDATOR_REL (1 << 1) struct user_event_validator { - struct list_head link; + struct list_head user_event_link; int offset; int flags; }; @@ -261,7 +261,7 @@ static struct user_event_group static void user_event_enabler_destroy(struct user_event_enabler *enabler) { - list_del_rcu(&enabler->link); + list_del_rcu(&enabler->mm_enablers_link); /* No longer tracking the event via the enabler */ refcount_dec(&enabler->event->refcnt); @@ -440,7 +440,7 @@ static bool user_event_enabler_exists(struct user_event_mm *mm, { struct user_event_enabler *enabler; - list_for_each_entry(enabler, &mm->enablers, link) { + list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit) return true; } @@ -461,7 +461,7 @@ static void user_event_enabler_update(struct user_event *user) next = mm->next; mmap_read_lock(mm->mm); - list_for_each_entry(enabler, &mm->enablers, link) { + list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) { if (enabler->event == user) { attempt = 0; user_event_enabler_write(mm, enabler, true, &attempt); @@ -497,7 +497,7 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig, refcount_inc(&enabler->event->refcnt); /* Enablers not exposed yet, RCU not required */ - list_add(&enabler->link, &mm->enablers); + list_add(&enabler->mm_enablers_link, &mm->enablers); return true; } @@ -527,13 +527,15 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user) */ rcu_read_lock(); - list_for_each_entry_rcu(mm, &user_event_mms, link) - list_for_each_entry_rcu(enabler, &mm->enablers, link) + list_for_each_entry_rcu(mm, &user_event_mms, mms_link) { + list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) { if (enabler->event == user) { mm->next = found; found = user_event_mm_get(mm); break; } + } + } rcu_read_unlock(); @@ -572,7 +574,7 @@ static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_stru unsigned long flags; spin_lock_irqsave(&user_event_mms_lock, flags); - list_add_rcu(&user_mm->link, &user_event_mms); + list_add_rcu(&user_mm->mms_link, &user_event_mms); spin_unlock_irqrestore(&user_event_mms_lock, flags); t->user_event_mm = user_mm; @@ -601,7 +603,7 @@ static void user_event_mm_destroy(struct user_event_mm *mm) { struct user_event_enabler *enabler, *next; - list_for_each_entry_safe(enabler, next, &mm->enablers, link) + list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) user_event_enabler_destroy(enabler); mmdrop(mm->mm); @@ -638,7 +640,7 @@ void user_event_mm_remove(struct task_struct *t) /* Remove the mm from the list, so it can no longer be enabled */ spin_lock_irqsave(&user_event_mms_lock, flags); - list_del_rcu(&mm->link); + list_del_rcu(&mm->mms_link); spin_unlock_irqrestore(&user_event_mms_lock, flags); /* @@ -686,9 +688,10 @@ void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm) rcu_read_lock(); - list_for_each_entry_rcu(enabler, &old_mm->enablers, link) + list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) { if (!user_event_enabler_dup(enabler, mm)) goto error; + } rcu_read_unlock(); @@ -757,7 +760,7 @@ static struct user_event_enabler */ if (!*write_result) { refcount_inc(&enabler->event->refcnt); - list_add_rcu(&enabler->link, &user_mm->enablers); + list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers); } mutex_unlock(&event_mutex); @@ -913,8 +916,8 @@ static void user_event_destroy_validators(struct user_event *user) struct user_event_validator *validator, *next; struct list_head *head = &user->validators; - list_for_each_entry_safe(validator, next, head, link) { - list_del(&validator->link); + list_for_each_entry_safe(validator, next, head, user_event_link) { + list_del(&validator->user_event_link); kfree(validator); } } @@ -968,7 +971,7 @@ static int user_event_add_field(struct user_event *user, const char *type, validator->offset = offset; /* Want sequential access when validating */ - list_add_tail(&validator->link, &user->validators); + list_add_tail(&validator->user_event_link, &user->validators); add_field: field->type = type; @@ -1358,7 +1361,7 @@ static int user_event_validate(struct user_event *user, void *data, int len) void *pos, *end = data + len; u32 loc, offset, size; - list_for_each_entry(validator, head, link) { + list_for_each_entry(validator, head, user_event_link) { pos = data + validator->offset; /* Already done min_size check, no bounds check here */ @@ -2279,7 +2282,7 @@ static long user_events_ioctl_unreg(unsigned long uarg) */ mutex_lock(&event_mutex); - list_for_each_entry_safe(enabler, next, &mm->enablers, link) + list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) { if (enabler->addr == reg.disable_addr && ENABLE_BIT(enabler) == reg.disable_bit) { set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler)); @@ -2290,6 +2293,7 @@ static long user_events_ioctl_unreg(unsigned long uarg) /* Removed at least one */ ret = 0; } + } mutex_unlock(&event_mutex); From ff9e1632d69e596d8ca256deb07433a8f3565038 Mon Sep 17 00:00:00 2001 From: Beau Belgrave Date: Fri, 19 May 2023 16:07:41 -0700 Subject: [PATCH 072/303] tracing/user_events: Document user_event_mm one-shot list usage During 6.4 development it became clear that the one-shot list used by the user_event_mm's next field was confusing to others. It is not clear how this list is protected or what the next field usage is for unless you are familiar with the code. Add comments into the user_event_mm struct indicating lock requirement and usage. Also document how and why this approach was used via comments in both user_event_enabler_update() and user_event_mm_get_all() and the rules to properly use it. Link: https://lkml.kernel.org/r/20230519230741.669-5-beaub@linux.microsoft.com Link: https://lore.kernel.org/linux-trace-kernel/CAHk-=wicngggxVpbnrYHjRTwGE0WYscPRM+L2HO2BF8ia1EXgQ@mail.gmail.com/ Suggested-by: Linus Torvalds Signed-off-by: Beau Belgrave Signed-off-by: Steven Rostedt (Google) --- include/linux/user_events.h | 1 + kernel/trace/trace_events_user.c | 23 ++++++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/include/linux/user_events.h b/include/linux/user_events.h index 17d452b389dec..8afa8c3a09730 100644 --- a/include/linux/user_events.h +++ b/include/linux/user_events.h @@ -20,6 +20,7 @@ struct user_event_mm { struct list_head mms_link; struct list_head enablers; struct mm_struct *mm; + /* Used for one-shot lists, protected by event_mutex */ struct user_event_mm *next; refcount_t refcnt; refcount_t tasks; diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index 238c7a0615fad..dbb14705d0d36 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -451,12 +451,25 @@ static bool user_event_enabler_exists(struct user_event_mm *mm, static void user_event_enabler_update(struct user_event *user) { struct user_event_enabler *enabler; - struct user_event_mm *mm = user_event_mm_get_all(user); struct user_event_mm *next; + struct user_event_mm *mm; int attempt; lockdep_assert_held(&event_mutex); + /* + * We need to build a one-shot list of all the mms that have an + * enabler for the user_event passed in. This list is only valid + * while holding the event_mutex. The only reason for this is due + * to the global mm list being RCU protected and we use methods + * which can wait (mmap_read_lock and pin_user_pages_remote). + * + * NOTE: user_event_mm_get_all() increments the ref count of each + * mm that is added to the list to prevent removal timing windows. + * We must always put each mm after they are used, which may wait. + */ + mm = user_event_mm_get_all(user); + while (mm) { next = mm->next; mmap_read_lock(mm->mm); @@ -515,6 +528,14 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user) struct user_event_enabler *enabler; struct user_event_mm *mm; + /* + * We use the mm->next field to build a one-shot list from the global + * RCU protected list. To build this list the event_mutex must be held. + * This lets us build a list without requiring allocs that could fail + * when user based events are most wanted for diagnostics. + */ + lockdep_assert_held(&event_mutex); + /* * We do not want to block fork/exec while enablements are being * updated, so we use RCU to walk the current tasks that have used From e30fbc618e97b38dbb49f1d44dcd0778d3f23b8c Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 23 May 2023 22:11:08 -0400 Subject: [PATCH 073/303] tracing/histograms: Allow variables to have some modifiers Modifiers are used to change the behavior of keys. For instance, they can grouped into buckets, converted to syscall names (from the syscall identifier), show task->comm of the current pid, be an array of longs that represent a stacktrace, and more. It was found that nothing stopped a value from taking a modifier. As values are simple counters. If this happened, it would call code that was not expecting a modifier and crash the kernel. This was fixed by having the ___create_val_field() function test if a modifier was present and fail if one was. This fixed the crash. Now there's a problem with variables. Variables are used to pass fields from one event to another. Variables are allowed to have some modifiers, as the processing may need to happen at the time of the event (like stacktraces and comm names of the current pid). The issue is that it too uses __create_val_field(). Now that fails on modifiers, variables can no longer use them (this is a regression). As not all modifiers are for variables, have them use a separate check. Link: https://lore.kernel.org/linux-trace-kernel/20230523221108.064a5d82@rorschach.local.home Cc: stable@vger.kernel.org Cc: Masami Hiramatsu Cc: Tom Zanussi Cc: Mark Rutland Fixes: e0213434fe3e4 ("tracing: Do not let histogram values have some modifiers") Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_events_hist.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 486cca3c2b754..543cb7dc84ad4 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -4238,13 +4238,19 @@ static int __create_val_field(struct hist_trigger_data *hist_data, goto out; } - /* Some types cannot be a value */ - if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | - HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 | - HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET | - HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) { - hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str)); - ret = -EINVAL; + /* values and variables should not have some modifiers */ + if (hist_field->flags & HIST_FIELD_FL_VAR) { + /* Variable */ + if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | + HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2)) + goto err; + } else { + /* Value */ + if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | + HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 | + HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET | + HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) + goto err; } hist_data->fields[val_idx] = hist_field; @@ -4256,6 +4262,9 @@ static int __create_val_field(struct hist_trigger_data *hist_data, ret = -EINVAL; out: return ret; + err: + hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str)); + return -EINVAL; } static int create_val_field(struct hist_trigger_data *hist_data, From 4b512860bdbdddcf41467ebd394f27cb8dfb528c Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 23 May 2023 23:09:13 -0400 Subject: [PATCH 074/303] tracing: Rename stacktrace field to common_stacktrace The histogram and synthetic events can use a pseudo event called "stacktrace" that will create a stacktrace at the time of the event and use it just like it was a normal field. We have other pseudo events such as "common_cpu" and "common_timestamp". To stay consistent with that, convert "stacktrace" to "common_stacktrace". As this was used in older kernels, to keep backward compatibility, this will act just like "common_cpu" did with "cpu". That is, "cpu" will be the same as "common_cpu" unless the event has a "cpu" field. In which case, the event's field is used. The same is true with "stacktrace". Also update the documentation to reflect this change. Link: https://lore.kernel.org/linux-trace-kernel/20230523230913.6860e28d@rorschach.local.home Cc: Masami Hiramatsu Cc: Tom Zanussi Cc: Mark Rutland Signed-off-by: Steven Rostedt (Google) --- Documentation/trace/histogram.rst | 64 +++++++++++++++---------------- include/linux/trace_events.h | 1 + kernel/trace/trace.c | 2 +- kernel/trace/trace_events.c | 2 + kernel/trace/trace_events_hist.c | 16 +++++--- 5 files changed, 46 insertions(+), 39 deletions(-) diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index 479c9eac63352..3c9b263de9c24 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -35,7 +35,7 @@ Documentation written by Tom Zanussi in place of an explicit value field - this is simply a count of event hits. If 'values' isn't specified, an implicit 'hitcount' value will be automatically created and used as the only value. - Keys can be any field, or the special string 'stacktrace', which + Keys can be any field, or the special string 'common_stacktrace', which will use the event's kernel stacktrace as the key. The keywords 'keys' or 'key' can be used to specify keys, and the keywords 'values', 'vals', or 'val' can be used to specify values. Compound @@ -54,7 +54,7 @@ Documentation written by Tom Zanussi 'compatible' if the fields named in the trigger share the same number and type of fields and those fields also have the same names. Note that any two events always share the compatible 'hitcount' and - 'stacktrace' fields and can therefore be combined using those + 'common_stacktrace' fields and can therefore be combined using those fields, however pointless that may be. 'hist' triggers add a 'hist' file to each event's subdirectory. @@ -547,9 +547,9 @@ Extended error information the hist trigger display symbolic call_sites, we can have the hist trigger additionally display the complete set of kernel stack traces that led to each call_site. To do that, we simply use the special - value 'stacktrace' for the key parameter:: + value 'common_stacktrace' for the key parameter:: - # echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \ + # echo 'hist:keys=common_stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \ /sys/kernel/tracing/events/kmem/kmalloc/trigger The above trigger will use the kernel stack trace in effect when an @@ -561,9 +561,9 @@ Extended error information every callpath to a kmalloc for a kernel compile):: # cat /sys/kernel/tracing/events/kmem/kmalloc/hist - # trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active] + # trigger info: hist:keys=common_stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active] - { stacktrace: + { common_stacktrace: __kmalloc_track_caller+0x10b/0x1a0 kmemdup+0x20/0x50 hidraw_report_event+0x8a/0x120 [hid] @@ -581,7 +581,7 @@ Extended error information cpu_startup_entry+0x315/0x3e0 rest_init+0x7c/0x80 } hitcount: 3 bytes_req: 21 bytes_alloc: 24 - { stacktrace: + { common_stacktrace: __kmalloc_track_caller+0x10b/0x1a0 kmemdup+0x20/0x50 hidraw_report_event+0x8a/0x120 [hid] @@ -596,7 +596,7 @@ Extended error information do_IRQ+0x5a/0xf0 ret_from_intr+0x0/0x30 } hitcount: 3 bytes_req: 21 bytes_alloc: 24 - { stacktrace: + { common_stacktrace: kmem_cache_alloc_trace+0xeb/0x150 aa_alloc_task_context+0x27/0x40 apparmor_cred_prepare+0x1f/0x50 @@ -608,7 +608,7 @@ Extended error information . . . - { stacktrace: + { common_stacktrace: __kmalloc+0x11b/0x1b0 i915_gem_execbuffer2+0x6c/0x2c0 [i915] drm_ioctl+0x349/0x670 [drm] @@ -616,7 +616,7 @@ Extended error information SyS_ioctl+0x81/0xa0 system_call_fastpath+0x12/0x6a } hitcount: 17726 bytes_req: 13944120 bytes_alloc: 19593808 - { stacktrace: + { common_stacktrace: __kmalloc+0x11b/0x1b0 load_elf_phdrs+0x76/0xa0 load_elf_binary+0x102/0x1650 @@ -625,7 +625,7 @@ Extended error information SyS_execve+0x3a/0x50 return_from_execve+0x0/0x23 } hitcount: 33348 bytes_req: 17152128 bytes_alloc: 20226048 - { stacktrace: + { common_stacktrace: kmem_cache_alloc_trace+0xeb/0x150 apparmor_file_alloc_security+0x27/0x40 security_file_alloc+0x16/0x20 @@ -636,7 +636,7 @@ Extended error information SyS_open+0x1e/0x20 system_call_fastpath+0x12/0x6a } hitcount: 4766422 bytes_req: 9532844 bytes_alloc: 38131376 - { stacktrace: + { common_stacktrace: __kmalloc+0x11b/0x1b0 seq_buf_alloc+0x1b/0x50 seq_read+0x2cc/0x370 @@ -1026,7 +1026,7 @@ Extended error information First we set up an initially paused stacktrace trigger on the netif_receive_skb event:: - # echo 'hist:key=stacktrace:vals=len:pause' > \ + # echo 'hist:key=common_stacktrace:vals=len:pause' > \ /sys/kernel/tracing/events/net/netif_receive_skb/trigger Next, we set up an 'enable_hist' trigger on the sched_process_exec @@ -1060,9 +1060,9 @@ Extended error information $ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz # cat /sys/kernel/tracing/events/net/netif_receive_skb/hist - # trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused] + # trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused] - { stacktrace: + { common_stacktrace: __netif_receive_skb_core+0x46d/0x990 __netif_receive_skb+0x18/0x60 netif_receive_skb_internal+0x23/0x90 @@ -1079,7 +1079,7 @@ Extended error information kthread+0xd2/0xf0 ret_from_fork+0x42/0x70 } hitcount: 85 len: 28884 - { stacktrace: + { common_stacktrace: __netif_receive_skb_core+0x46d/0x990 __netif_receive_skb+0x18/0x60 netif_receive_skb_internal+0x23/0x90 @@ -1097,7 +1097,7 @@ Extended error information irq_thread+0x11f/0x150 kthread+0xd2/0xf0 } hitcount: 98 len: 664329 - { stacktrace: + { common_stacktrace: __netif_receive_skb_core+0x46d/0x990 __netif_receive_skb+0x18/0x60 process_backlog+0xa8/0x150 @@ -1115,7 +1115,7 @@ Extended error information inet_sendmsg+0x64/0xa0 sock_sendmsg+0x3d/0x50 } hitcount: 115 len: 13030 - { stacktrace: + { common_stacktrace: __netif_receive_skb_core+0x46d/0x990 __netif_receive_skb+0x18/0x60 netif_receive_skb_internal+0x23/0x90 @@ -1142,14 +1142,14 @@ Extended error information into the histogram. In order to avoid having to set everything up again, we can just clear the histogram first:: - # echo 'hist:key=stacktrace:vals=len:clear' >> \ + # echo 'hist:key=common_stacktrace:vals=len:clear' >> \ /sys/kernel/tracing/events/net/netif_receive_skb/trigger Just to verify that it is in fact cleared, here's what we now see in the hist file:: # cat /sys/kernel/tracing/events/net/netif_receive_skb/hist - # trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused] + # trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused] Totals: Hits: 0 @@ -1485,12 +1485,12 @@ Extended error information And here's an example that shows how to combine histogram data from any two events even if they don't share any 'compatible' fields - other than 'hitcount' and 'stacktrace'. These commands create a + other than 'hitcount' and 'common_stacktrace'. These commands create a couple of triggers named 'bar' using those fields:: - # echo 'hist:name=bar:key=stacktrace:val=hitcount' > \ + # echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \ /sys/kernel/tracing/events/sched/sched_process_fork/trigger - # echo 'hist:name=bar:key=stacktrace:val=hitcount' > \ + # echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \ /sys/kernel/tracing/events/net/netif_rx/trigger And displaying the output of either shows some interesting if @@ -1501,16 +1501,16 @@ Extended error information # event histogram # - # trigger info: hist:name=bar:keys=stacktrace:vals=hitcount:sort=hitcount:size=2048 [active] + # trigger info: hist:name=bar:keys=common_stacktrace:vals=hitcount:sort=hitcount:size=2048 [active] # - { stacktrace: + { common_stacktrace: kernel_clone+0x18e/0x330 kernel_thread+0x29/0x30 kthreadd+0x154/0x1b0 ret_from_fork+0x3f/0x70 } hitcount: 1 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx_ni+0x20/0x70 dev_loopback_xmit+0xaa/0xd0 @@ -1528,7 +1528,7 @@ Extended error information call_cpuidle+0x3b/0x60 cpu_startup_entry+0x22d/0x310 } hitcount: 1 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx_ni+0x20/0x70 dev_loopback_xmit+0xaa/0xd0 @@ -1543,7 +1543,7 @@ Extended error information SyS_sendto+0xe/0x10 entry_SYSCALL_64_fastpath+0x12/0x6a } hitcount: 2 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx+0x1c/0x60 loopback_xmit+0x6c/0xb0 @@ -1561,7 +1561,7 @@ Extended error information sock_sendmsg+0x38/0x50 ___sys_sendmsg+0x14e/0x270 } hitcount: 76 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx+0x1c/0x60 loopback_xmit+0x6c/0xb0 @@ -1579,7 +1579,7 @@ Extended error information sock_sendmsg+0x38/0x50 ___sys_sendmsg+0x269/0x270 } hitcount: 77 - { stacktrace: + { common_stacktrace: netif_rx_internal+0xb2/0xd0 netif_rx+0x1c/0x60 loopback_xmit+0x6c/0xb0 @@ -1597,7 +1597,7 @@ Extended error information sock_sendmsg+0x38/0x50 SYSC_sendto+0xef/0x170 } hitcount: 88 - { stacktrace: + { common_stacktrace: kernel_clone+0x18e/0x330 SyS_clone+0x19/0x20 entry_SYSCALL_64_fastpath+0x12/0x6a @@ -1949,7 +1949,7 @@ uninterruptible state:: # cd /sys/kernel/tracing # echo 's:block_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events - # echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger + # echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger # echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(block_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger # echo 1 > events/synthetic/block_lat/enable # cat trace diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 0e373222a6df8..7c4a0b72334eb 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -806,6 +806,7 @@ enum { FILTER_TRACE_FN, FILTER_COMM, FILTER_CPU, + FILTER_STACKTRACE, }; extern int trace_event_raw_init(struct trace_event_call *call); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ebc59781456a2..81801dc317849 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5752,7 +5752,7 @@ static const char readme_msg[] = "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" "\t correspond to fields in the event's format description. Keys\n" - "\t can be any field, or the special string 'stacktrace'.\n" + "\t can be any field, or the special string 'common_stacktrace'.\n" "\t Compound keys consisting of up to two fields can be specified\n" "\t by the 'keys' keyword. Values must correspond to numeric\n" "\t fields. Sort keys consisting of up to two fields can be\n" diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 654ffa40457aa..57e539d479890 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -194,6 +194,8 @@ static int trace_define_generic_fields(void) __generic_field(int, common_cpu, FILTER_CPU); __generic_field(char *, COMM, FILTER_COMM); __generic_field(char *, comm, FILTER_COMM); + __generic_field(char *, stacktrace, FILTER_STACKTRACE); + __generic_field(char *, STACKTRACE, FILTER_STACKTRACE); return ret; } diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 543cb7dc84ad4..b97d3ad832f1a 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1364,7 +1364,7 @@ static const char *hist_field_name(struct hist_field *field, if (field->field) field_name = field->field->name; else - field_name = "stacktrace"; + field_name = "common_stacktrace"; } else if (field->flags & HIST_FIELD_FL_HITCOUNT) field_name = "hitcount"; @@ -2367,7 +2367,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, hist_data->enable_timestamps = true; if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) hist_data->attrs->ts_in_usecs = true; - } else if (strcmp(field_name, "stacktrace") == 0) { + } else if (strcmp(field_name, "common_stacktrace") == 0) { *flags |= HIST_FIELD_FL_STACKTRACE; } else if (strcmp(field_name, "common_cpu") == 0) *flags |= HIST_FIELD_FL_CPU; @@ -2378,11 +2378,15 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, if (!field || !field->size) { /* * For backward compatibility, if field_name - * was "cpu", then we treat this the same as - * common_cpu. This also works for "CPU". + * was "cpu" or "stacktrace", then we treat this + * the same as common_cpu and common_stacktrace + * respectively. This also works for "CPU", and + * "STACKTRACE". */ if (field && field->filter_type == FILTER_CPU) { *flags |= HIST_FIELD_FL_CPU; + } else if (field && field->filter_type == FILTER_STACKTRACE) { + *flags |= HIST_FIELD_FL_STACKTRACE; } else { hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); @@ -5394,7 +5398,7 @@ static void hist_trigger_print_key(struct seq_file *m, if (key_field->field) seq_printf(m, "%s.stacktrace", key_field->field->name); else - seq_puts(m, "stacktrace:\n"); + seq_puts(m, "common_stacktrace:\n"); hist_trigger_stacktrace_print(m, key + key_field->offset, HIST_STACKTRACE_DEPTH); @@ -5977,7 +5981,7 @@ static int event_hist_trigger_print(struct seq_file *m, if (field->field) seq_printf(m, "%s.stacktrace", field->field->name); else - seq_puts(m, "stacktrace"); + seq_puts(m, "common_stacktrace"); } else hist_field_print(m, field); } From f1aab363438cd1c55a3c5420db2a17c0e29ef625 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 23 May 2023 22:54:29 -0400 Subject: [PATCH 075/303] tracing/selftests: Update synthetic event selftest to use common_stacktrace With the rename of the stacktrace field to common_stacktrace, update the selftests to reflect this change. Copy the current selftest to test the backward compatibility "stacktrace" keyword. Also the "requires" of that test was incorrect, so it would never actually ran before. That is fixed now. Link: https://lore.kernel.org/linux-trace-kernel/20230523225402.55951f2f@rorschach.local.home Cc: Masami Hiramatsu Cc: Tom Zanussi Cc: Mark Rutland Cc: Shuah Khan Cc: Shuah Khan Signed-off-by: Steven Rostedt (Google) --- .../trigger-synthetic-event-stack-legacy.tc | 24 +++++++++++++++++++ .../trigger-synthetic-event-stack.tc | 5 ++-- 2 files changed, 26 insertions(+), 3 deletions(-) create mode 100644 tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack-legacy.tc diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack-legacy.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack-legacy.tc new file mode 100644 index 0000000000000..d0cd91a930699 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack-legacy.tc @@ -0,0 +1,24 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# description: event trigger - test inter-event histogram trigger trace action with dynamic string param (legacy stack) +# requires: set_event synthetic_events events/sched/sched_process_exec/hist "long[] stack' >> synthetic_events":README + +fail() { #msg + echo $1 + exit_fail +} + +echo "Test create synthetic event with stack" + +# Test the old stacktrace keyword (for backward compatibility) +echo 's:wake_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events +echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger +echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(wake_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger +echo 1 > events/synthetic/wake_lat/enable +sleep 1 + +if ! grep -q "=>.*sched" trace; then + fail "Failed to create synthetic event with stack" +fi + +exit 0 diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack.tc index 755dbe94ccf4a..8f1cc9a86a060 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack.tc @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: event trigger - test inter-event histogram trigger trace action with dynamic string param -# requires: set_event synthetic_events events/sched/sched_process_exec/hist "long[]' >> synthetic_events":README +# requires: set_event synthetic_events events/sched/sched_process_exec/hist "can be any field, or the special string 'common_stacktrace'":README fail() { #msg echo $1 @@ -10,9 +10,8 @@ fail() { #msg echo "Test create synthetic event with stack" - echo 's:wake_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events -echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger +echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(wake_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger echo 1 > events/synthetic/wake_lat/enable sleep 1 From a1a5f2c887252dec161c1e12e04303ca9ba56fa9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 23 May 2023 21:53:10 -0700 Subject: [PATCH 076/303] dmaengine: pl330: rename _start to prevent build error "_start" is used in several arches and proably should be reserved for ARCH usage. Using it in a driver for a private symbol can cause a build error when it conflicts with ARCH usage of the same symbol. Therefore rename pl330's "_start" to "pl330_start_thread" so that there is no conflict and no build error. drivers/dma/pl330.c:1053:13: error: '_start' redeclared as different kind of symbol 1053 | static bool _start(struct pl330_thread *thrd) | ^~~~~~ In file included from ../include/linux/interrupt.h:21, from ../drivers/dma/pl330.c:18: arch/riscv/include/asm/sections.h:11:13: note: previous declaration of '_start' with type 'char[]' 11 | extern char _start[]; | ^~~~~~ Fixes: b7d861d93945 ("DMA: PL330: Merge PL330 driver into drivers/dma/") Fixes: ae43b3289186 ("ARM: 8202/1: dmaengine: pl330: Add runtime Power Management support v12") Signed-off-by: Randy Dunlap Cc: Jaswinder Singh Cc: Boojin Kim Cc: Krzysztof Kozlowski Cc: Russell King Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Cc: linux-riscv@lists.infradead.org Link: https://lore.kernel.org/r/20230524045310.27923-1-rdunlap@infradead.org Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 0d9257fbdfb0d..b4731fe6bbc14 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1050,7 +1050,7 @@ static bool _trigger(struct pl330_thread *thrd) return true; } -static bool _start(struct pl330_thread *thrd) +static bool pl330_start_thread(struct pl330_thread *thrd) { switch (_state(thrd)) { case PL330_STATE_FAULT_COMPLETING: @@ -1702,7 +1702,7 @@ static int pl330_update(struct pl330_dmac *pl330) thrd->req_running = -1; /* Get going again ASAP */ - _start(thrd); + pl330_start_thread(thrd); /* For now, just make a list of callbacks to be done */ list_add_tail(&descdone->rqd, &pl330->req_done); @@ -2089,7 +2089,7 @@ static void pl330_tasklet(struct tasklet_struct *t) } else { /* Make sure the PL330 Channel thread is active */ spin_lock(&pch->thread->dmac->lock); - _start(pch->thread); + pl330_start_thread(pch->thread); spin_unlock(&pch->thread->dmac->lock); } @@ -2107,7 +2107,7 @@ static void pl330_tasklet(struct tasklet_struct *t) if (power_down) { pch->active = true; spin_lock(&pch->thread->dmac->lock); - _start(pch->thread); + pl330_start_thread(pch->thread); spin_unlock(&pch->thread->dmac->lock); power_down = false; } From 2a6c7e8cc74e58ba94b8c897035a8ef7f7349f76 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Tue, 23 May 2023 19:20:37 +0200 Subject: [PATCH 077/303] dmaengine: at_hdmac: Repair bitfield macros for peripheral ID handling The MSB part of the peripheral IDs need to go into the ATC_SRC_PER_MSB and ATC_DST_PER_MSB fields. Not the LSB part. This fixes a severe regression for TSE-850 devices (compatible axentia,tse850v3) where output to the audio I2S codec (the main purpose of the device) simply do not work. Fixes: d8840a7edcf0 ("dmaengine: at_hdmac: Use bitfield access macros") Cc: stable@vger.kernel.org Signed-off-by: Peter Rosin Reviewed-by: Tudor Ambarus Link: https://lore.kernel.org/r/01e5dae1-d4b0-cf31-516b-423b11b077f1@axentia.se Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 8858470246e15..6362013b90df1 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -153,8 +153,6 @@ #define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */ /* Bitfields in CFG */ -#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ - #define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */ #define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */ #define ATC_SRC_REP BIT(8) /* Source Replay Mod */ @@ -181,10 +179,15 @@ #define ATC_DPIP_HOLE GENMASK(15, 0) #define ATC_DPIP_BOUNDARY GENMASK(25, 16) -#define ATC_SRC_PER_ID(id) (FIELD_PREP(ATC_SRC_PER_MSB, (id)) | \ - FIELD_PREP(ATC_SRC_PER, (id))) -#define ATC_DST_PER_ID(id) (FIELD_PREP(ATC_DST_PER_MSB, (id)) | \ - FIELD_PREP(ATC_DST_PER, (id))) +#define ATC_PER_MSB GENMASK(5, 4) /* Extract MSBs of a handshaking identifier */ +#define ATC_SRC_PER_ID(id) \ + ({ typeof(id) _id = (id); \ + FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \ + FIELD_PREP(ATC_SRC_PER, _id); }) +#define ATC_DST_PER_ID(id) \ + ({ typeof(id) _id = (id); \ + FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \ + FIELD_PREP(ATC_DST_PER, _id); }) From e14fd2af7a1d621c167dad761f729135a7a76ff4 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Tue, 23 May 2023 19:20:47 +0200 Subject: [PATCH 078/303] dmaengine: at_hdmac: Extend the Flow Controller bitfield to three bits Some chips have two bits (e.g SAMA5D3), and some have three (e.g. SAM9G45). A field width of three is compatible as long as valid values are used for the different chips. There is no current use of any value needing three bits, so the fixed bug is relatively benign. Fixes: d8840a7edcf0 ("dmaengine: at_hdmac: Use bitfield access macros") Cc: stable@vger.kernel.org Reviewed-by: Tudor Ambarus Signed-off-by: Peter Rosin Link: https://lore.kernel.org/r/e2c898ba-c3a3-5dd3-384b-0585661c79f2@axentia.se Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6362013b90df1..ee3a219e3a897 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -132,7 +132,7 @@ #define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */ #define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */ #define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */ -#define ATC_FC GENMASK(22, 21) /* Choose Flow Controller */ +#define ATC_FC GENMASK(23, 21) /* Choose Flow Controller */ #define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */ #define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */ #define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */ From 6ab39f99927eed605728b02d512438d828183c97 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 16 May 2023 20:14:19 +0200 Subject: [PATCH 079/303] crypto: x86/aria - Use 16 byte alignment for GFNI constant vectors The GFNI routines in the AVX version of the ARIA implementation now use explicit VMOVDQA instructions to load the constant input vectors, which means they must be 16 byte aligned. So ensure that this is the case, by dropping the section split and the incorrect .align 8 directive, and emitting the constants into the 16-byte aligned section instead. Note that the AVX2 version of this code deviates from this pattern, and does not require a similar fix, given that it loads these contants as 8-byte memory operands, for which AVX2 permits any alignment. Cc: Taehee Yoo Fixes: 8b84475318641c2b ("crypto: x86/aria-avx - Do not use avx2 instructions") Reported-by: syzbot+a6abcf08bad8b18fd198@syzkaller.appspotmail.com Tested-by: syzbot+a6abcf08bad8b18fd198@syzkaller.appspotmail.com Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/x86/crypto/aria-aesni-avx-asm_64.S | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/crypto/aria-aesni-avx-asm_64.S b/arch/x86/crypto/aria-aesni-avx-asm_64.S index 7c1abc513f346..9556dacd98415 100644 --- a/arch/x86/crypto/aria-aesni-avx-asm_64.S +++ b/arch/x86/crypto/aria-aesni-avx-asm_64.S @@ -773,8 +773,6 @@ .octa 0x3F893781E95FE1576CDA64D2BA0CB204 #ifdef CONFIG_AS_GFNI -.section .rodata.cst8, "aM", @progbits, 8 -.align 8 /* AES affine: */ #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) .Ltf_aff_bitmatrix: From 95856d1f3c223c015780fffb8373a827fc4efd2e Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Tue, 23 May 2023 16:47:47 +0100 Subject: [PATCH 080/303] regmap: sdw: check for invalid multi-register writes config SoundWire code as it is only supports Bulk register writes and it does not support multi-register writes. Any drivers that set can_multi_write and use regmap_multi_reg_write() will easily endup with programming the hardware incorrectly without any errors. So, add this check in bus code to be able to validate the drivers config. Fixes: 522272047dc6 ("regmap: sdw: Remove 8-bit value size restriction") Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20230523154747.5429-1-srinivas.kandagatla@linaro.org Signed-off-by: Mark Brown --- drivers/base/regmap/regmap-sdw.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c index 09899ae99fc19..159c0b740b001 100644 --- a/drivers/base/regmap/regmap-sdw.c +++ b/drivers/base/regmap/regmap-sdw.c @@ -59,6 +59,10 @@ static int regmap_sdw_config_check(const struct regmap_config *config) if (config->pad_bits != 0) return -ENOTSUPP; + /* Only bulk writes are supported not multi-register writes */ + if (config->can_multi_write) + return -ENOTSUPP; + return 0; } From 0cc6578048e0980d254aee345130cced4912f723 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 23 May 2023 23:18:19 +0100 Subject: [PATCH 081/303] regmap: maple: Drop the RCU read lock while syncing registers Unfortunately the maple tree requires us to explicitly lock it so we need to take the RCU read lock while iterating. When syncing this means that we end up trying to write out register values while holding the RCU read lock which triggers lockdep issues since that is an atomic context but most buses can't be used in atomic context. Pause the iteration and drop the lock for each register we check to avoid this. Reported-by: Pierre-Louis Bossart Tested-by: Pierre-Louis Bossart Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20230523-regcache-maple-sync-lock-v1-1-530e4d68dfab@kernel.org Signed-off-by: Mark Brown --- drivers/base/regmap/regcache-maple.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c index 9b1b559107ef4..c2e3a0f6c2183 100644 --- a/drivers/base/regmap/regcache-maple.c +++ b/drivers/base/regmap/regcache-maple.c @@ -203,15 +203,18 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min, mas_for_each(&mas, entry, max) { for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) { + mas_pause(&mas); + rcu_read_unlock(); ret = regcache_sync_val(map, r, entry[r - mas.index]); if (ret != 0) goto out; + rcu_read_lock(); } } -out: rcu_read_unlock(); +out: map->cache_bypass = false; return ret; From a99d21cefd351c8aaa20b83a3c942340e5789d45 Mon Sep 17 00:00:00 2001 From: Deren Wu Date: Sat, 13 May 2023 22:48:15 +0800 Subject: [PATCH 082/303] mmc: vub300: fix invalid response handling We may get an empty response with zero length at the beginning of the driver start and get following UBSAN error. Since there is no content(SDRT_NONE) for the response, just return and skip the response handling to avoid this problem. Test pass : SDIO wifi throughput test with this patch [ 126.980684] UBSAN: array-index-out-of-bounds in drivers/mmc/host/vub300.c:1719:12 [ 126.980709] index -1 is out of range for type 'u32 [4]' [ 126.980729] CPU: 4 PID: 9 Comm: kworker/u16:0 Tainted: G E 6.3.0-rc4-mtk-local-202304272142 #1 [ 126.980754] Hardware name: Intel(R) Client Systems NUC8i7BEH/NUC8BEB, BIOS BECFL357.86A.0081.2020.0504.1834 05/04/2020 [ 126.980770] Workqueue: kvub300c vub300_cmndwork_thread [vub300] [ 126.980833] Call Trace: [ 126.980845] [ 126.980860] dump_stack_lvl+0x48/0x70 [ 126.980895] dump_stack+0x10/0x20 [ 126.980916] ubsan_epilogue+0x9/0x40 [ 126.980944] __ubsan_handle_out_of_bounds+0x70/0x90 [ 126.980979] vub300_cmndwork_thread+0x58e7/0x5e10 [vub300] [ 126.981018] ? _raw_spin_unlock+0x18/0x40 [ 126.981042] ? finish_task_switch+0x175/0x6f0 [ 126.981070] ? __switch_to+0x42e/0xda0 [ 126.981089] ? __switch_to_asm+0x3a/0x80 [ 126.981129] ? __pfx_vub300_cmndwork_thread+0x10/0x10 [vub300] [ 126.981174] ? __kasan_check_read+0x11/0x20 [ 126.981204] process_one_work+0x7ee/0x13d0 [ 126.981246] worker_thread+0x53c/0x1240 [ 126.981291] kthread+0x2b8/0x370 [ 126.981312] ? __pfx_worker_thread+0x10/0x10 [ 126.981336] ? __pfx_kthread+0x10/0x10 [ 126.981359] ret_from_fork+0x29/0x50 [ 126.981400] Fixes: 88095e7b473a ("mmc: Add new VUB300 USB-to-SD/SDIO/MMC driver") Signed-off-by: Deren Wu Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/048cd6972c50c33c2e8f81d5228fed928519918b.1683987673.git.deren.wu@mediatek.com Signed-off-by: Ulf Hansson --- drivers/mmc/host/vub300.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index e4c4bfac3763b..9ec593d52f0fa 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -1713,6 +1713,9 @@ static void construct_request_response(struct vub300_mmc_host *vub300, int bytes = 3 & less_cmd; int words = less_cmd >> 2; u8 *r = vub300->resp.response.command_response; + + if (!resp_len) + return; if (bytes == 3) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16) From 0b5d5c436a5c572a45f976cfd34a6741e143e5d9 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sat, 13 May 2023 21:23:52 +0200 Subject: [PATCH 083/303] mmc: pwrseq: sd8787: Fix WILC CHIP_EN and RESETN toggling order Chapter "5.3 Power-Up/Down Sequence" of WILC1000 [1] and WILC3000 [2] states that CHIP_EN must be pulled HIGH first, RESETN second. Fix the order of these signals in the driver. Use the mmc_pwrseq_ops as driver data as the delay between signals is specific to SDIO card type anyway. [1] https://ww1.microchip.com/downloads/aemDocuments/documents/WSG/ProductDocuments/DataSheets/ATWILC1000-MR110XB-IEEE-802.11-b-g-n-Link-Controller-Module-DS70005326E.pdf [2] https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ProductDocuments/DataSheets/IEEE-802.11-b-g-n-Link-Controller-Module-with-Integrated-Bluetooth-5.0-DS70005327B.pdf Fixes: b2832b96fcf5 ("mmc: pwrseq: sd8787: add support for wilc1000") Signed-off-by: Marek Vasut Reviewed-by: Claudiu Beznea Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230513192352.479627-1-marex@denx.de Signed-off-by: Ulf Hansson --- drivers/mmc/core/pwrseq_sd8787.c | 34 ++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c index 2e120ad83020f..0c5f5e371e1f8 100644 --- a/drivers/mmc/core/pwrseq_sd8787.c +++ b/drivers/mmc/core/pwrseq_sd8787.c @@ -28,7 +28,6 @@ struct mmc_pwrseq_sd8787 { struct mmc_pwrseq pwrseq; struct gpio_desc *reset_gpio; struct gpio_desc *pwrdn_gpio; - u32 reset_pwrdwn_delay_ms; }; #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq) @@ -39,7 +38,7 @@ static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host) gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); - msleep(pwrseq->reset_pwrdwn_delay_ms); + msleep(300); gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); } @@ -51,17 +50,37 @@ static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host) gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); } +static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host) +{ + struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq); + + /* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */ + gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); + msleep(5); + gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); +} + +static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host) +{ + struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq); + + gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); + gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0); +} + static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = { .pre_power_on = mmc_pwrseq_sd8787_pre_power_on, .power_off = mmc_pwrseq_sd8787_power_off, }; -static const u32 sd8787_delay_ms = 300; -static const u32 wilc1000_delay_ms = 5; +static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = { + .pre_power_on = mmc_pwrseq_wilc1000_pre_power_on, + .power_off = mmc_pwrseq_wilc1000_power_off, +}; static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = { - { .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms }, - { .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms }, + { .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops }, + { .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops }, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match); @@ -77,7 +96,6 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev) return -ENOMEM; match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node); - pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data; pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW); if (IS_ERR(pwrseq->pwrdn_gpio)) @@ -88,7 +106,7 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev) return PTR_ERR(pwrseq->reset_gpio); pwrseq->pwrseq.dev = dev; - pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops; + pwrseq->pwrseq.ops = match->data; pwrseq->pwrseq.owner = THIS_MODULE; platform_set_drvdata(pdev, pwrseq); From 8d0f019e4c4f2ee2de81efd9bf1c27e9fb3c0460 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 15 May 2023 21:46:00 +0100 Subject: [PATCH 084/303] arm64: Add missing Set/Way CMO encodings Add the missing Set/Way CMOs that apply to tagged memory. Signed-off-by: Marc Zyngier Reviewed-by: Cornelia Huck Reviewed-by: Steven Price Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20230515204601.1270428-2-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index e72d9aaab6b1b..eefd712f24303 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -115,8 +115,14 @@ #define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) +#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) +#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) +#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4) +#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) +#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) +#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) /* * Automatically generated definitions for system registers, the From d282fa3c5ccb7a0029c418f358143689553b6447 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 15 May 2023 21:46:01 +0100 Subject: [PATCH 085/303] KVM: arm64: Handle trap of tagged Set/Way CMOs We appear to have missed the Set/Way CMOs when adding MTE support. Not that we really expect anyone to use them, but you never know what stupidity some people can come up with... Treat these mostly like we deal with the classic S/W CMOs, only with an additional check that MTE really is enabled. Signed-off-by: Marc Zyngier Reviewed-by: Cornelia Huck Reviewed-by: Steven Price Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20230515204601.1270428-3-maz@kernel.org --- arch/arm64/kvm/sys_regs.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 71b12094d6137..753aa74181497 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, return true; } +static bool access_dcgsw(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (!kvm_has_mte(vcpu->kvm)) { + kvm_inject_undefined(vcpu); + return false; + } + + /* Treat MTE S/W ops as we treat the classic ones: with contempt */ + return access_dcsw(vcpu, p, r); +} + static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift) { switch (r->aarch32_map) { @@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu, */ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DC_ISW), access_dcsw }, + { SYS_DESC(SYS_DC_IGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_IGDSW), access_dcgsw }, { SYS_DESC(SYS_DC_CSW), access_dcsw }, + { SYS_DESC(SYS_DC_CGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_CGDSW), access_dcgsw }, { SYS_DESC(SYS_DC_CISW), access_dcsw }, + { SYS_DESC(SYS_DC_CIGSW), access_dcgsw }, + { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw }, DBG_BCR_BVR_WCR_WVR_EL1(0), DBG_BCR_BVR_WCR_WVR_EL1(1), From a9f0e3d5a089d0844abb679a5e99f15010d53e25 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 22 May 2023 11:32:58 +0100 Subject: [PATCH 086/303] KVM: arm64: Reload PTE after invoking walker callback on preorder traversal The preorder callback on the kvm_pgtable_stage2_map() path can replace a table with a block, then recursively free the detached table. The higher-level walking logic stashes the old page table entry and then walks the freed table, invoking the leaf callback and potentially freeing pgtable pages prematurely. In normal operation, the call to tear down the detached stage-2 is indirected and uses an RCU callback to trigger the freeing. RCU is not available to pKVM, which is where this bug is triggered. Change the behavior of the walker to reload the page table entry after invoking the walker callback on preorder traversal, as it does for leaf entries. Tested on Pixel 6. Fixes: 5c359cca1faf ("KVM: arm64: Tear down unlinked stage-2 subtree after break-before-make") Suggested-by: Oliver Upton Signed-off-by: Fuad Tabba Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230522103258.402272-1-tabba@google.com --- arch/arm64/include/asm/kvm_pgtable.h | 6 +++--- arch/arm64/kvm/hyp/pgtable.c | 14 +++++++++++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index dc3c072e862f1..93bd0975b15f5 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -632,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size); * * The walker will walk the page-table entries corresponding to the input * address range specified, visiting entries according to the walker flags. - * Invalid entries are treated as leaf entries. Leaf entries are reloaded - * after invoking the walker callback, allowing the walker to descend into - * a newly installed table. + * Invalid entries are treated as leaf entries. The visited page table entry is + * reloaded after invoking the walker callback, allowing the walker to descend + * into a newly installed table. * * Returning a negative error code from the walker callback function will * terminate the walk immediately with the same error code. diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 5282cb9ca4cff..e1eacffbc41f4 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -209,14 +209,26 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, .flags = flags, }; int ret = 0; + bool reload = false; kvm_pteref_t childp; bool table = kvm_pte_table(ctx.old, level); - if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) + if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) { ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE); + reload = true; + } if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) { ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF); + reload = true; + } + + /* + * Reload the page table after invoking the walker callback for leaf + * entries or after pre-order traversal, to allow the walker to descend + * into a newly installed or replaced table. + */ + if (reload) { ctx.old = READ_ONCE(*ptep); table = kvm_pte_table(ctx.old, level); } From 33d418da6f476b15e4510e0a590062583f63cd36 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Fri, 19 May 2023 15:13:11 +0200 Subject: [PATCH 087/303] riscv: Fix unused variable warning when BUILTIN_DTB is set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit ef69d2559fe9 ("riscv: Move early dtb mapping into the fixmap region") wrongly moved the #ifndef CONFIG_BUILTIN_DTB surrounding the pa variable definition in create_fdt_early_page_table(), so move it back to its right place to quiet the following warning: ../arch/riscv/mm/init.c: In function ‘create_fdt_early_page_table’: ../arch/riscv/mm/init.c:925:12: warning: unused variable ‘pa’ [-Wunused-variable] 925 | uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); Fixes: ef69d2559fe9 ("riscv: Move early dtb mapping into the fixmap region") Signed-off-by: Alexandre Ghiti Reviewed-by: Conor Dooley Link: https://lore.kernel.org/r/20230519131311.391960-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 747e5b1ef02d3..c6bb966e41237 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -922,9 +922,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early) static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va, uintptr_t dtb_pa) { +#ifndef CONFIG_BUILTIN_DTB uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); -#ifndef CONFIG_BUILTIN_DTB /* Make sure the fdt fixmap address is always aligned on PMD size */ BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); From 8557dc27126949c702bd3aafe8a7e0b7e4fcb44c Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Wed, 24 May 2023 09:41:18 +0800 Subject: [PATCH 088/303] md/raid5: fix miscalculation of 'end_sector' in raid5_read_one_chunk() 'end_sector' is compared to 'rdev->recovery_offset', which is offset to rdev, however, commit e82ed3a4fbb5 ("md/raid6: refactor raid5_read_one_chunk") changes the calculation of 'end_sector' to offset to the array. Fix this miscalculation. Fixes: e82ed3a4fbb5 ("md/raid6: refactor raid5_read_one_chunk") Cc: stable@vger.kernel.org # v5.12+ Signed-off-by: Yu Kuai Reviewed-by: Christoph Hellwig Signed-off-by: Song Liu Link: https://lore.kernel.org/r/20230524014118.3172781-1-yukuai1@huaweicloud.com --- drivers/md/raid5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4739ed891e756..9ea285fbc4a65 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5516,7 +5516,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, &dd_idx, NULL); - end_sector = bio_end_sector(raid_bio); + end_sector = sector + bio_sectors(raid_bio); rcu_read_lock(); if (r5c_big_stripe_cached(conf, sector)) From e2ab5aa11f191b54514f063a5b5c29f3559f4ab7 Mon Sep 17 00:00:00 2001 From: Chris Mi Date: Wed, 1 Mar 2023 10:50:53 +0200 Subject: [PATCH 089/303] net/mlx5e: Extract remaining tunnel encap code to dedicated file Move set_encap_dests() and clean_encap_dests() to the tunnel encap dedicated file. And rename them to mlx5e_tc_tun_encap_dests_set() and mlx5e_tc_tun_encap_dests_unset(). No functional change in this patch. It is needed in the next patch. Signed-off-by: Chris Mi Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en/tc_tun_encap.c | 83 +++++++++++++++++ .../mellanox/mlx5/core/en/tc_tun_encap.h | 9 ++ .../net/ethernet/mellanox/mlx5/core/en_tc.c | 89 +------------------ 3 files changed, 94 insertions(+), 87 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 6a052c6cfc158..d516227b8fe88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -1016,6 +1016,89 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv, return err; } +int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, + struct netlink_ext_ack *extack, + bool *vf_tun) +{ + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_esw_flow_attr *esw_attr; + struct net_device *encap_dev = NULL; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *out_priv; + int out_index; + int err = 0; + + if (!mlx5e_is_eswitch_flow(flow)) + return 0; + + parse_attr = attr->parse_attr; + esw_attr = attr->esw_attr; + *vf_tun = false; + + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { + struct net_device *out_dev; + int mirred_ifindex; + + if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) + continue; + + mirred_ifindex = parse_attr->mirred_ifindex[out_index]; + out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); + if (!out_dev) { + NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found"); + err = -ENODEV; + goto out; + } + err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index, + extack, &encap_dev); + dev_put(out_dev); + if (err) + goto out; + + if (esw_attr->dests[out_index].flags & + MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && + !esw_attr->dest_int_port) + *vf_tun = true; + + out_priv = netdev_priv(encap_dev); + rpriv = out_priv->ppriv; + esw_attr->dests[out_index].rep = rpriv->rep; + esw_attr->dests[out_index].mdev = out_priv->mdev; + } + + if (*vf_tun && esw_attr->out_count > 1) { + NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported"); + err = -EOPNOTSUPP; + goto out; + } + +out: + return err; +} + +void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr; + int out_index; + + if (!mlx5e_is_eswitch_flow(flow)) + return; + + esw_attr = attr->esw_attr; + + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { + if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) + continue; + + mlx5e_detach_encap(flow->priv, flow, attr, out_index); + kfree(attr->parse_attr->tun_info[out_index]); + } +} + static int cmp_route_info(struct mlx5e_route_key *a, struct mlx5e_route_key *b) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h index 8ad273dde40ee..5d7d67687cbcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h @@ -30,6 +30,15 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv, void mlx5e_detach_decap_route(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); +int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr, + struct netlink_ext_ack *extack, + bool *vf_tun); +void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); + struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info); int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index e95414ef1f040..8935156f8f4e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1699,91 +1699,6 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); } -static int -set_encap_dests(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr, - struct netlink_ext_ack *extack, - bool *vf_tun) -{ - struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5_esw_flow_attr *esw_attr; - struct net_device *encap_dev = NULL; - struct mlx5e_rep_priv *rpriv; - struct mlx5e_priv *out_priv; - int out_index; - int err = 0; - - if (!mlx5e_is_eswitch_flow(flow)) - return 0; - - parse_attr = attr->parse_attr; - esw_attr = attr->esw_attr; - *vf_tun = false; - - for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { - struct net_device *out_dev; - int mirred_ifindex; - - if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) - continue; - - mirred_ifindex = parse_attr->mirred_ifindex[out_index]; - out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); - if (!out_dev) { - NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found"); - err = -ENODEV; - goto out; - } - err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index, - extack, &encap_dev); - dev_put(out_dev); - if (err) - goto out; - - if (esw_attr->dests[out_index].flags & - MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE && - !esw_attr->dest_int_port) - *vf_tun = true; - - out_priv = netdev_priv(encap_dev); - rpriv = out_priv->ppriv; - esw_attr->dests[out_index].rep = rpriv->rep; - esw_attr->dests[out_index].mdev = out_priv->mdev; - } - - if (*vf_tun && esw_attr->out_count > 1) { - NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported"); - err = -EOPNOTSUPP; - goto out; - } - -out: - return err; -} - -static void -clean_encap_dests(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr) -{ - struct mlx5_esw_flow_attr *esw_attr; - int out_index; - - if (!mlx5e_is_eswitch_flow(flow)) - return; - - esw_attr = attr->esw_attr; - - for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { - if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) - continue; - - mlx5e_detach_encap(priv, flow, attr, out_index); - kfree(attr->parse_attr->tun_info[out_index]); - } -} - static int verify_attr_actions(u32 actions, struct netlink_ext_ack *extack) { @@ -1820,7 +1735,7 @@ post_process_attr(struct mlx5e_tc_flow *flow, if (err) goto err_out; - err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun); + err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun); if (err) goto err_out; @@ -4324,7 +4239,7 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a if (attr->post_act_handle) mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle); - clean_encap_dests(flow->priv, flow, attr); + mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) mlx5_fc_destroy(counter_dev, attr->counter); From 37c3b9fa7ccf5caad6d87ba4d42bf00be46be1cf Mon Sep 17 00:00:00 2001 From: Chris Mi Date: Tue, 21 Feb 2023 04:41:41 +0200 Subject: [PATCH 090/303] net/mlx5e: Prevent encap offload when neigh update is running The cited commit adds a compeletion to remove dependency on rtnl lock. But it causes a deadlock for multiple encapsulations: crash> bt ffff8aece8a64000 PID: 1514557 TASK: ffff8aece8a64000 CPU: 3 COMMAND: "tc" #0 [ffffa6d14183f368] __schedule at ffffffffb8ba7f45 #1 [ffffa6d14183f3f8] schedule at ffffffffb8ba8418 #2 [ffffa6d14183f418] schedule_preempt_disabled at ffffffffb8ba8898 #3 [ffffa6d14183f428] __mutex_lock at ffffffffb8baa7f8 #4 [ffffa6d14183f4d0] mutex_lock_nested at ffffffffb8baabeb #5 [ffffa6d14183f4e0] mlx5e_attach_encap at ffffffffc0f48c17 [mlx5_core] #6 [ffffa6d14183f628] mlx5e_tc_add_fdb_flow at ffffffffc0f39680 [mlx5_core] #7 [ffffa6d14183f688] __mlx5e_add_fdb_flow at ffffffffc0f3b636 [mlx5_core] #8 [ffffa6d14183f6f0] mlx5e_tc_add_flow at ffffffffc0f3bcdf [mlx5_core] #9 [ffffa6d14183f728] mlx5e_configure_flower at ffffffffc0f3c1d1 [mlx5_core] #10 [ffffa6d14183f790] mlx5e_rep_setup_tc_cls_flower at ffffffffc0f3d529 [mlx5_core] #11 [ffffa6d14183f7a0] mlx5e_rep_setup_tc_cb at ffffffffc0f3d714 [mlx5_core] #12 [ffffa6d14183f7b0] tc_setup_cb_add at ffffffffb8931bb8 #13 [ffffa6d14183f810] fl_hw_replace_filter at ffffffffc0dae901 [cls_flower] #14 [ffffa6d14183f8d8] fl_change at ffffffffc0db5c57 [cls_flower] #15 [ffffa6d14183f970] tc_new_tfilter at ffffffffb8936047 #16 [ffffa6d14183fac8] rtnetlink_rcv_msg at ffffffffb88c7c31 #17 [ffffa6d14183fb50] netlink_rcv_skb at ffffffffb8942853 #18 [ffffa6d14183fbc0] rtnetlink_rcv at ffffffffb88c1835 #19 [ffffa6d14183fbd0] netlink_unicast at ffffffffb8941f27 #20 [ffffa6d14183fc18] netlink_sendmsg at ffffffffb8942245 #21 [ffffa6d14183fc98] sock_sendmsg at ffffffffb887d482 #22 [ffffa6d14183fcb8] ____sys_sendmsg at ffffffffb887d81a #23 [ffffa6d14183fd38] ___sys_sendmsg at ffffffffb88806e2 #24 [ffffa6d14183fe90] __sys_sendmsg at ffffffffb88807a2 #25 [ffffa6d14183ff28] __x64_sys_sendmsg at ffffffffb888080f #26 [ffffa6d14183ff38] do_syscall_64 at ffffffffb8b9b6a8 #27 [ffffa6d14183ff50] entry_SYSCALL_64_after_hwframe at ffffffffb8c0007c crash> bt 0xffff8aeb07544000 PID: 1110766 TASK: ffff8aeb07544000 CPU: 0 COMMAND: "kworker/u20:9" #0 [ffffa6d14e6b7bd8] __schedule at ffffffffb8ba7f45 #1 [ffffa6d14e6b7c68] schedule at ffffffffb8ba8418 #2 [ffffa6d14e6b7c88] schedule_timeout at ffffffffb8baef88 #3 [ffffa6d14e6b7d10] wait_for_completion at ffffffffb8ba968b #4 [ffffa6d14e6b7d60] mlx5e_take_all_encap_flows at ffffffffc0f47ec4 [mlx5_core] #5 [ffffa6d14e6b7da0] mlx5e_rep_update_flows at ffffffffc0f3e734 [mlx5_core] #6 [ffffa6d14e6b7df8] mlx5e_rep_neigh_update at ffffffffc0f400bb [mlx5_core] #7 [ffffa6d14e6b7e50] process_one_work at ffffffffb80acc9c #8 [ffffa6d14e6b7ed0] worker_thread at ffffffffb80ad012 #9 [ffffa6d14e6b7f10] kthread at ffffffffb80b615d #10 [ffffa6d14e6b7f50] ret_from_fork at ffffffffb8001b2f After the first encap is attached, flow will be added to encap entry's flows list. If neigh update is running at this time, the following encaps of the flow can't hold the encap_tbl_lock and sleep. If neigh update thread is waiting for that flow's init_done, deadlock happens. Fix it by holding lock outside of the for loop. If neigh update is running, prevent encap flows from offloading. Since the lock is held outside of the for loop, concurrent creation of encap entries is not allowed. So remove unnecessary wait_for_completion call for res_ready. Fixes: 95435ad7999b ("net/mlx5e: Only access fully initialized flows in neigh update") Signed-off-by: Chris Mi Reviewed-by: Roi Dayan Reviewed-by: Vlad Buslov Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en/tc_tun_encap.c | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index d516227b8fe88..f0c3464f037f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -492,6 +492,19 @@ void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) mlx5e_encap_dealloc(priv, e); } +static void mlx5e_encap_put_locked(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + lockdep_assert_held(&esw->offloads.encap_tbl_lock); + + if (!refcount_dec_and_test(&e->refcnt)) + return; + list_del(&e->route_list); + hash_del_rcu(&e->encap_hlist); + mlx5e_encap_dealloc(priv, e); +} + static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -816,6 +829,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, uintptr_t hash_key; int err = 0; + lockdep_assert_held(&esw->offloads.encap_tbl_lock); + parse_attr = attr->parse_attr; tun_info = parse_attr->tun_info[out_index]; mpls_info = &parse_attr->mpls_info[out_index]; @@ -829,7 +844,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, hash_key = hash_encap_info(&key); - mutex_lock(&esw->offloads.encap_tbl_lock); e = mlx5e_encap_get(priv, &key, hash_key); /* must verify if encap is valid or not */ @@ -840,15 +854,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, goto out_err; } - mutex_unlock(&esw->offloads.encap_tbl_lock); - wait_for_completion(&e->res_ready); - - /* Protect against concurrent neigh update. */ - mutex_lock(&esw->offloads.encap_tbl_lock); - if (e->compl_result < 0) { - err = -EREMOTEIO; - goto out_err; - } goto attach_flow; } @@ -877,15 +882,12 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, INIT_LIST_HEAD(&e->flows); hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); tbl_time_before = mlx5e_route_tbl_get_last_update(priv); - mutex_unlock(&esw->offloads.encap_tbl_lock); if (family == AF_INET) err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); else if (family == AF_INET6) err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); - /* Protect against concurrent neigh update. */ - mutex_lock(&esw->offloads.encap_tbl_lock); complete_all(&e->res_ready); if (err) { e->compl_result = err; @@ -920,18 +922,15 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv, } else { flow_flag_set(flow, SLOW); } - mutex_unlock(&esw->offloads.encap_tbl_lock); return err; out_err: - mutex_unlock(&esw->offloads.encap_tbl_lock); if (e) - mlx5e_encap_put(priv, e); + mlx5e_encap_put_locked(priv, e); return err; out_err_init: - mutex_unlock(&esw->offloads.encap_tbl_lock); kfree(tun_info); kfree(e); return err; @@ -1027,6 +1026,7 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, struct net_device *encap_dev = NULL; struct mlx5e_rep_priv *rpriv; struct mlx5e_priv *out_priv; + struct mlx5_eswitch *esw; int out_index; int err = 0; @@ -1037,6 +1037,8 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, esw_attr = attr->esw_attr; *vf_tun = false; + esw = priv->mdev->priv.eswitch; + mutex_lock(&esw->offloads.encap_tbl_lock); for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { struct net_device *out_dev; int mirred_ifindex; @@ -1075,6 +1077,7 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv, } out: + mutex_unlock(&esw->offloads.encap_tbl_lock); return err; } From 81fe2be062915e2a2fdc494c3cd90e946e946c25 Mon Sep 17 00:00:00 2001 From: Maher Sanalla Date: Mon, 1 May 2023 17:31:40 +0300 Subject: [PATCH 091/303] net/mlx5e: Consider internal buffers size in port buffer calculations Currently, when a user triggers a change in port buffer headroom (buffers 0-7), the driver checks that the requested headroom does not exceed the total port buffer size. However, this check does not take into account the internal buffers (buffers 8-9), which are also part of the total port buffer. This can result in treating invalid port buffer change requests as valid, causing unintended changes to the shared buffer. To address this, include the internal buffers size in the calculation of available port buffer space which ensures that port buffer requests do not exceed the correct limit. Furthermore, remove internal buffers (8-9) size from the total_size calculation as these buffers are reserved for internal use and are not exposed to the user. While at it, add verbosity to the debug prints in mlx5e_port_query_buffer() function to ease future debugging. Fixes: ecdf2dadee8e ("net/mlx5e: Receive buffer support for DCBX") Signed-off-by: Maher Sanalla Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en/port_buffer.c | 42 ++++++++++++------- .../mellanox/mlx5/core/en/port_buffer.h | 8 ++-- .../ethernet/mellanox/mlx5/core/en_dcbnl.c | 7 ++-- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 7ac1ad9c46de0..0d78527451bca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -51,7 +51,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, if (err) goto out; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]); port_buffer->buffer[i].lossy = MLX5_GET(bufferx_reg, buffer, lossy); @@ -73,14 +73,24 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv, port_buffer->buffer[i].lossy); } - port_buffer->headroom_size = total_used; + port_buffer->internal_buffers_size = 0; + for (i = MLX5E_MAX_NETWORK_BUFFER; i < MLX5E_TOTAL_BUFFERS; i++) { + buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]); + port_buffer->internal_buffers_size += + MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz; + } + port_buffer->port_buffer_size = MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz; - port_buffer->spare_buffer_size = - port_buffer->port_buffer_size - total_used; - - mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n", - port_buffer->port_buffer_size, + port_buffer->headroom_size = total_used; + port_buffer->spare_buffer_size = port_buffer->port_buffer_size - + port_buffer->internal_buffers_size - + port_buffer->headroom_size; + + mlx5e_dbg(HW, priv, + "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n", + port_buffer->port_buffer_size, port_buffer->headroom_size, + port_buffer->internal_buffers_size, port_buffer->spare_buffer_size); out: kfree(out); @@ -206,11 +216,11 @@ static int port_update_pool_cfg(struct mlx5_core_dev *mdev, if (!MLX5_CAP_GEN(mdev, sbcam_reg)) return 0; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) lossless_buff_count += ((port_buffer->buffer[i].size) && (!(port_buffer->buffer[i].lossy))); - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count); err = mlx5e_port_set_sbcm(mdev, 0, i, MLX5_INGRESS_DIR, @@ -293,7 +303,7 @@ static int port_set_buffer(struct mlx5e_priv *priv, if (err) goto out; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); u64 size = port_buffer->buffer[i].size; u64 xoff = port_buffer->buffer[i].xoff; @@ -351,7 +361,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, { int i; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { if (port_buffer->buffer[i].lossy) { port_buffer->buffer[i].xoff = 0; port_buffer->buffer[i].xon = 0; @@ -408,7 +418,7 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev, int err; int i; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { prio_count = 0; lossy_count = 0; @@ -515,7 +525,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { update_prio2buffer = true; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n", __func__, i, prio2buffer[i]); @@ -530,7 +540,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, } if (change & MLX5E_PORT_BUFFER_SIZE) { - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]); if (!port_buffer.buffer[i].lossy && !buffer_size[i]) { mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n", @@ -544,7 +554,9 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used); - if (total_used > port_buffer.port_buffer_size) + if (total_used > port_buffer.headroom_size && + (total_used - port_buffer.headroom_size) > + port_buffer.spare_buffer_size) return -EINVAL; update_buffer = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h index a6ef118de758f..f4a19ffbb641c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -35,7 +35,8 @@ #include "en.h" #include "port.h" -#define MLX5E_MAX_BUFFER 8 +#define MLX5E_MAX_NETWORK_BUFFER 8 +#define MLX5E_TOTAL_BUFFERS 10 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \ @@ -60,8 +61,9 @@ struct mlx5e_bufferx_reg { struct mlx5e_port_buffer { u32 port_buffer_size; u32 spare_buffer_size; - u32 headroom_size; - struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER]; + u32 headroom_size; /* Buffers 0-7 */ + u32 internal_buffers_size; /* Buffers 8-9 */ + struct mlx5e_bufferx_reg buffer[MLX5E_MAX_NETWORK_BUFFER]; }; int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 89de92d064836..ebee52a8361aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -926,9 +926,10 @@ static int mlx5e_dcbnl_getbuffer(struct net_device *dev, if (err) return err; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size; - dcb_buffer->total_size = port_buffer.port_buffer_size; + dcb_buffer->total_size = port_buffer.port_buffer_size - + port_buffer.internal_buffers_size; return 0; } @@ -970,7 +971,7 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev, if (err) return err; - for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) { if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) { changed |= MLX5E_PORT_BUFFER_SIZE; buffer_size = dcb_buffer->buffer_size; From 623efc4cbd6115db36716e31037cb6d1f3ce6754 Mon Sep 17 00:00:00 2001 From: Maher Sanalla Date: Tue, 9 May 2023 17:56:01 +0300 Subject: [PATCH 092/303] net/mlx5e: Do not update SBCM when prio2buffer command is invalid The shared buffer pools configuration which are stored in the SBCM register are updated when the user changes the prio2buffer mapping. However, in case the user desired prio2buffer change is invalid, which can occur due to mapping a lossless priority to a not large enough buffer, the SBCM update should not be performed, as the user command is failed. Thus, Perform the SBCM update only after xoff threshold calculation is performed and the user prio2buffer mapping is validated. Fixes: a440030d8946 ("net/mlx5e: Update shared buffer along with device buffer changes") Signed-off-by: Maher Sanalla Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index 0d78527451bca..7e8e96cc5cd08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -442,11 +442,11 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev, } if (changed) { - err = port_update_pool_cfg(mdev, port_buffer); + err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); if (err) return err; - err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); + err = port_update_pool_cfg(mdev, port_buffer); if (err) return err; From 824c8dc4a470040bf0e56ba716543839c2498d49 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Mon, 24 Apr 2023 12:31:59 +0300 Subject: [PATCH 093/303] net/mlx5: Drain health before unregistering devlink mlx5 health mechanism is using devlink APIs, which are using devlink notify APIs. After the cited patch, using devlink notify APIs after devlink is unregistered triggers a WARN_ON(). Hence, drain health WQ before devlink is unregistered. Fixes: cf530217408e ("devlink: Notify users when objects are accessible") Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a7eb65cd0bddd..2132a65106391 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1802,15 +1802,16 @@ static void remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(dev); set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); - /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain - * fw_reset before unregistering the devlink. + /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using + * devlink notify APIs. + * Hence, we must drain them before unregistering the devlink. */ mlx5_drain_fw_reset(dev); + mlx5_drain_health_wq(dev); devlink_unregister(devlink); mlx5_sriov_disable(pdev); mlx5_thermal_uninit(dev); mlx5_crdump_disable(dev); - mlx5_drain_health_wq(dev); mlx5_uninit_one(dev); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); From b4646da0573fae9dfa2b8f1f10936cb6eedd7230 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Mon, 24 Apr 2023 12:46:06 +0300 Subject: [PATCH 094/303] net/mlx5: SF, Drain health before removing device There is no point in recovery during device removal. Also, if health work started need to wait for it to avoid races and NULL pointer access. Hence, drain health WQ before removing device. Fixes: 1958fc2f0712 ("net/mlx5: SF, Add auxiliary device driver") Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index e2f26d0bc6154..0692363cf80e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -63,6 +63,7 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev) struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); struct devlink *devlink = priv_to_devlink(sf_dev->mdev); + mlx5_drain_health_wq(sf_dev->mdev); devlink_unregister(devlink); mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); From 341a80de2468f481b1f771683709b5649cbfe513 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Sat, 29 Apr 2023 20:41:41 +0300 Subject: [PATCH 095/303] net/mlx5: fw_tracer, Fix event handling mlx5 driver needs to parse traces with event_id inside the range of first_string_trace and num_string_trace. However, mlx5 is parsing all events with event_id >= first_string_trace. Fix it by checking for the correct range. Fixes: c71ad41ccb0c ("net/mlx5: FW tracer, events handling") Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index f40497823e65f..7c0f2adbea000 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -490,7 +490,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer, (u64)timestamp_low; break; default: - if (tracer_event->event_id >= tracer->str_db.first_string_trace || + if (tracer_event->event_id >= tracer->str_db.first_string_trace && tracer_event->event_id <= tracer->str_db.first_string_trace + tracer->str_db.num_string_trace) { tracer_event->type = TRACER_EVENT_TYPE_STRING; From 1db1f21caebbb1b6e9b1e7657df613616be3fb49 Mon Sep 17 00:00:00 2001 From: Dragos Tatulea Date: Thu, 13 Apr 2023 15:48:30 +0300 Subject: [PATCH 096/303] net/mlx5e: Use query_special_contexts cmd only once per mdev Don't query the firmware so many times (num rqs * num wqes * wqe frags) because it slows down linearly the interface creation time when the product is larger. Do it only once per mdev and store the result in mlx5e_param. Due to helper function being called from different files, move it to an appropriate location. Rename the function with a proper prefix and add a small cleanup. This fix applies only for legacy rq. Fixes: 1b1e4868836a ("net/mlx5e: Use query_special_contexts for mkeys") Signed-off-by: Dragos Tatulea Reviewed-by: Or Har-Toov Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 24 +++---------------- drivers/net/ethernet/mellanox/mlx5/core/mr.c | 21 ++++++++++++++++ include/linux/mlx5/driver.h | 1 + 4 files changed, 26 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b8987a404d752..8e999f2381948 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -327,6 +327,7 @@ struct mlx5e_params { unsigned int sw_mtu; int hard_mtu; bool ptp_rx; + __be32 terminate_lkey_be; }; static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2944691f06add..0235adcbc6090 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -727,26 +727,6 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq) mlx5e_rq_shampo_hd_free(rq); } -static __be32 mlx5e_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev) -{ - u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {}; - u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {}; - int res; - - if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey)) - return MLX5_TERMINATE_SCATTER_LIST_LKEY; - - MLX5_SET(query_special_contexts_in, in, opcode, - MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - res = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out); - if (res) - return MLX5_TERMINATE_SCATTER_LIST_LKEY; - - res = MLX5_GET(query_special_contexts_out, out, - terminate_scatter_list_mkey); - return cpu_to_be32(res); -} - static int mlx5e_alloc_rq(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5e_rq_param *rqp, @@ -908,7 +888,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, /* check if num_frags is not a pow of two */ if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { wqe->data[f].byte_count = 0; - wqe->data[f].lkey = mlx5e_get_terminate_scatter_list_mkey(mdev); + wqe->data[f].lkey = params->terminate_lkey_be; wqe->data[f].addr = 0; } } @@ -5007,6 +4987,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 /* RQ */ mlx5e_build_rq_params(mdev, params); + params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev); + params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 9d735c343a3b8..678f0be813752 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -32,6 +32,7 @@ #include #include +#include #include "mlx5_core.h" int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, @@ -122,3 +123,23 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) return mlx5_cmd_exec_in(dev, destroy_psv, in); } EXPORT_SYMBOL(mlx5_core_destroy_psv); + +__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {}; + u32 mkey; + + if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey)) + return MLX5_TERMINATE_SCATTER_LIST_LKEY; + + MLX5_SET(query_special_contexts_in, in, opcode, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + if (mlx5_cmd_exec_inout(dev, query_special_contexts, in, out)) + return MLX5_TERMINATE_SCATTER_LIST_LKEY; + + mkey = MLX5_GET(query_special_contexts_out, out, + terminate_scatter_list_mkey); + return cpu_to_be32(mkey); +} +EXPORT_SYMBOL(mlx5_core_get_terminate_scatter_list_mkey); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a4c4f737f9c11..94d2be5848ae6 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1093,6 +1093,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, int npsvs, u32 *sig_index); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); +__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *odp_caps); From 5d862ec631f3d3cc3b4f8cdb5b9fc5879663f1d3 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Mon, 22 May 2023 14:48:52 +0200 Subject: [PATCH 097/303] net/mlx5: Fix post parse infra to only parse every action once Caller of mlx5e_tc_act_post_parse() needs it to parse only the subset of actions starting after previous split and ending at the current action. However, that range is not provided as arguments and mlx5e_tc_act_post_parse() uses generic flow_action_for_each() that iterates over all flow actions. Not only this is redundant, it also causes a bug when mlx5e_tc_act->post_parse() callback is not idempotent since it will be called for every split. For example, ct action tc_act_post_parse_ct() callback obtains a reference to mlx5_ct_ft instance and calling it several times during parsing stage will cause reference counter imbalance. Fix the issue by providing a proper action range of the current split subset to mlx5e_tc_act_post_parse() and only calling mlx5e_tc_act->post_parse() for actions inside the subset range. Fixes: 8300f225268b ("net/mlx5e: Create new flow attr for multi table actions") Signed-off-by: Vlad Buslov Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c | 7 ++++++- drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 8 +++++--- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c index fc923a99b6a48..0380a04c3691c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -84,7 +84,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, int mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, - struct flow_action *flow_action, + struct flow_action *flow_action, int from, int to, struct mlx5_flow_attr *attr, enum mlx5_flow_namespace_type ns_type) { @@ -96,6 +96,11 @@ mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, priv = parse_state->flow->priv; flow_action_for_each(i, act, flow_action) { + if (i < from) + continue; + else if (i > to) + break; + tc_act = mlx5e_tc_act_get(act->id, ns_type); if (!tc_act || !tc_act->post_parse) continue; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h index 0e6e1872ac62e..d6c12d0ea55ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -112,7 +112,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state, int mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state, - struct flow_action *flow_action, + struct flow_action *flow_action, int from, int to, struct mlx5_flow_attr *attr, enum mlx5_flow_namespace_type ns_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 8935156f8f4e5..8a5a8703f0a31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3859,8 +3859,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, struct mlx5_flow_attr *prev_attr; struct flow_action_entry *act; struct mlx5e_tc_act *tc_act; + int err, i, i_split = 0; bool is_missable; - int err, i; ns_type = mlx5e_get_flow_namespace(flow); list_add(&attr->list, &flow->attrs); @@ -3901,7 +3901,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, i < flow_action->num_entries - 1)) { is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false; - err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type); + err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, + ns_type); if (err) goto out_free_post_acts; @@ -3911,6 +3912,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, goto out_free_post_acts; } + i_split = i + 1; list_add(&attr->list, &flow->attrs); } @@ -3925,7 +3927,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state, } } - err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type); + err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type); if (err) goto out_free_post_acts; From bdf274750fca17b289404ef03453c4070725302c Mon Sep 17 00:00:00 2001 From: Dmytro Linkin Date: Wed, 13 Oct 2021 14:39:24 +0300 Subject: [PATCH 098/303] net/mlx5e: Don't attach netdev profile while handling internal error As part of switchdev mode disablement, driver changes port netdevice profile from uplink to nic. If this process is triggered by health recovery flow (PCI reset, for ex.) profile attach would fail because all fw commands aborted when internal error flag is set. As a result, nic netdevice profile is not attached and driver fails to rollback to uplink profile, which leave driver in broken state and cause crash later. To handle broken state do netdevice profile initialization only instead of full attachment and release mdev resources on driver suspend as expected. Actual netdevice attachment is done during driver load. Fixes: c4d7eb57687f ("net/mxl5e: Add change profile method") Signed-off-by: Dmytro Linkin Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_main.c | 35 ++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0235adcbc6090..a07bbe9a61bef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5833,8 +5833,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) } static int -mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, - const struct mlx5e_profile *new_profile, void *new_ppriv) +mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, + const struct mlx5e_profile *new_profile, void *new_ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@ -5850,6 +5850,25 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde err = new_profile->init(priv->mdev, priv->netdev); if (err) goto priv_cleanup; + + return 0; + +priv_cleanup: + mlx5e_priv_cleanup(priv); + return err; +} + +static int +mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, + const struct mlx5e_profile *new_profile, void *new_ppriv) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv); + if (err) + return err; + err = mlx5e_attach_netdev(priv); if (err) goto profile_cleanup; @@ -5857,7 +5876,6 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde profile_cleanup: new_profile->cleanup(priv); -priv_cleanup: mlx5e_priv_cleanup(priv); return err; } @@ -5876,6 +5894,12 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, priv->profile->cleanup(priv); mlx5e_priv_cleanup(priv); + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv); + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + return -EIO; + } + err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv); if (err) { /* roll back to original profile */ netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err); @@ -5937,8 +5961,11 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - if (!netif_device_present(netdev)) + if (!netif_device_present(netdev)) { + if (test_bit(MLX5E_STATE_DESTROYING, &priv->state)) + mlx5e_destroy_mdev_resources(mdev); return -ENODEV; + } mlx5e_detach_netdev(priv); mlx5e_destroy_mdev_resources(mdev); From c4c24fc30cc417ace332ceceaba4f70f81dcd521 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Tue, 16 May 2023 02:28:02 +0000 Subject: [PATCH 099/303] net/mlx5e: Move Ethernet driver debugfs to profile init callback As priv->dfs_root is cleared, and therefore missed, when change eswitch mode, move the creation of the root debugfs to the init callback of mlx5e_nic_profile and mlx5e_uplink_rep_profile, and the destruction to the cleanup callback for symmeter. Fixes: 288eca60cc31 ("net/mlx5e: Add Ethernet driver debugfs") Signed-off-by: Jianbo Liu Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 10 +++++----- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 6 ++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a07bbe9a61bef..a7c526ee50247 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5261,12 +5261,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5e_timestamp_init(priv); + priv->dfs_root = debugfs_create_dir("nic", + mlx5_debugfs_get_dev_root(mdev)); + fs = mlx5e_fs_init(priv->profile, mdev, !test_bit(MLX5E_STATE_DESTROYING, &priv->state), priv->dfs_root); if (!fs) { err = -ENOMEM; mlx5_core_err(mdev, "FS initialization failed, %d\n", err); + debugfs_remove_recursive(priv->dfs_root); return err; } priv->fs = fs; @@ -5287,6 +5291,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) mlx5e_health_destroy_reporters(priv); mlx5e_ktls_cleanup(priv); mlx5e_fs_cleanup(priv->fs); + debugfs_remove_recursive(priv->dfs_root); priv->fs = NULL; } @@ -6011,9 +6016,6 @@ static int mlx5e_probe(struct auxiliary_device *adev, priv->profile = profile; priv->ppriv = NULL; - priv->dfs_root = debugfs_create_dir("nic", - mlx5_debugfs_get_dev_root(priv->mdev)); - err = profile->init(mdev, netdev); if (err) { mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err); @@ -6042,7 +6044,6 @@ static int mlx5e_probe(struct auxiliary_device *adev, err_profile_cleanup: profile->cleanup(priv); err_destroy_netdev: - debugfs_remove_recursive(priv->dfs_root); mlx5e_destroy_netdev(priv); err_devlink_port_unregister: mlx5e_devlink_port_unregister(mlx5e_dev); @@ -6062,7 +6063,6 @@ static void mlx5e_remove(struct auxiliary_device *adev) unregister_netdev(priv->netdev); mlx5e_suspend(adev, state); priv->profile->cleanup(priv); - debugfs_remove_recursive(priv->dfs_root); mlx5e_destroy_netdev(priv); mlx5e_devlink_port_unregister(mlx5e_dev); mlx5e_destroy_devlink(mlx5e_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 1fc386eccaf8f..3e7041bd5705e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include #include #include #include @@ -812,11 +813,15 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, { struct mlx5e_priv *priv = netdev_priv(netdev); + priv->dfs_root = debugfs_create_dir("nic", + mlx5_debugfs_get_dev_root(mdev)); + priv->fs = mlx5e_fs_init(priv->profile, mdev, !test_bit(MLX5E_STATE_DESTROYING, &priv->state), priv->dfs_root); if (!priv->fs) { netdev_err(priv->netdev, "FS allocation failed\n"); + debugfs_remove_recursive(priv->dfs_root); return -ENOMEM; } @@ -829,6 +834,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) { mlx5e_fs_cleanup(priv->fs); + debugfs_remove_recursive(priv->dfs_root); priv->fs = NULL; } From fe5c2d3aef9352ac36a6acbc2bff5f732211ce3b Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Wed, 17 May 2023 17:54:30 +0300 Subject: [PATCH 100/303] net/mlx5: DR, Add missing mutex init/destroy in pattern manager Add missing mutex init/destroy as caught by the lock's debug warning: DEBUG_LOCKS_WARN_ON(lock->magic != lock) Fixes: da5d0027d666 ("net/mlx5: DR, Add cache for modify header pattern") Signed-off-by: Yevgeny Kliteynik Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c index 13e06a6a6b22a..d6947fe13d560 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c @@ -213,6 +213,8 @@ struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn) } INIT_LIST_HEAD(&mgr->ptrn_list); + mutex_init(&mgr->modify_hdr_mutex); + return mgr; free_mgr: @@ -237,5 +239,6 @@ void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr) } mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool); + mutex_destroy(&mgr->modify_hdr_mutex); kfree(mgr); } From d5972f1000c1e6b5185ded0fc194f21984f26e14 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 19 May 2023 22:43:03 +0300 Subject: [PATCH 101/303] net/mlx5: Fix check for allocation failure in comp_irqs_request_pci() This function accidentally dereferences "cpus" instead of returning directly. Reported-by: kernel test robot Closes: https://lore.kernel.org/r/202305200354.KV3jU94w-lkp@intel.com/ Fixes: b48a0f72bc3e ("net/mlx5: Refactor completion irq request/release code") Signed-off-by: Dan Carpenter Reviewed-by: Simon Horman Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index fe698c79616c6..3db4866d7880f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -824,7 +824,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev) ncomp_eqs = table->num_comp_eqs; cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL); if (!cpus) - ret = -ENOMEM; + return -ENOMEM; i = 0; rcu_read_lock(); From c3acc46c602f1aa0449ea687057758e5224ae3da Mon Sep 17 00:00:00 2001 From: Bagas Sanjaya Date: Wed, 10 May 2023 10:54:12 +0700 Subject: [PATCH 102/303] Documentation: net/mlx5: Wrap vnic reporter devlink commands in code blocks Sphinx reports htmldocs warnings: Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst:287: WARNING: Unexpected indentation. Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst:288: WARNING: Block quote ends without a blank line; unexpected unindent. Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst:290: WARNING: Unexpected indentation. Fix above warnings by wrapping diagnostic devlink commands in "vnic reporter" section in code blocks to be consistent with other devlink command snippets. Fixes: b0bc615df488ab ("net/mlx5: Add vnic devlink health reporter to PFs/VFs") Fixes: cf14af140a5ad0 ("net/mlx5e: Add vnic devlink health reporter to representors") Reviewed-by: Leon Romanovsky Signed-off-by: Bagas Sanjaya Reviewed-by: Simon Horman Signed-off-by: Saeed Mahameed --- .../device_drivers/ethernet/mellanox/mlx5/devlink.rst | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst index 3a7a714cc08f0..0f0598caea145 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst @@ -283,10 +283,14 @@ nic_receive_steering_discard: number of packets that completed RX flow steering but were discarded due to a mismatch in flow table. User commands examples: -- Diagnose PF/VF vnic counters + +- Diagnose PF/VF vnic counters:: + $ devlink health diagnose pci/0000:82:00.1 reporter vnic + - Diagnose representor vnic counters (performed by supplying devlink port of the - representor, which can be obtained via devlink port command) + representor, which can be obtained via devlink port command):: + $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic NOTE: This command can run over all interfaces such as PF/VF and representor ports. From 565b8c60e90f7f2d4763f5979c7aaa43697ab950 Mon Sep 17 00:00:00 2001 From: Bagas Sanjaya Date: Wed, 10 May 2023 10:54:13 +0700 Subject: [PATCH 103/303] Documentation: net/mlx5: Use bullet and definition lists for vnic counters description "vnic reporter" section contains unformatted description for vnic counters, which is rendered as one long paragraph instead of list. Use bullet and definition lists to match other lists. Fixes: b0bc615df488ab ("net/mlx5: Add vnic devlink health reporter to PFs/VFs") Reviewed-by: Leon Romanovsky Signed-off-by: Bagas Sanjaya Reviewed-by: Simon Horman Reviewed-by: Daniel Machon Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/devlink.rst | 36 ++++++++++--------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst index 0f0598caea145..00687425d8b72 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst @@ -265,22 +265,26 @@ It is responsible for querying the vnic diagnostic counters from fw and displayi them in realtime. Description of the vnic counters: -total_q_under_processor_handle: number of queues in an error state due to -an async error or errored command. -send_queue_priority_update_flow: number of QP/SQ priority/SL update -events. -cq_overrun: number of times CQ entered an error state due to an -overflow. -async_eq_overrun: number of times an EQ mapped to async events was -overrun. -comp_eq_overrun: number of times an EQ mapped to completion events was -overrun. -quota_exceeded_command: number of commands issued and failed due to quota -exceeded. -invalid_command: number of commands issued and failed dues to any reason -other than quota exceeded. -nic_receive_steering_discard: number of packets that completed RX flow -steering but were discarded due to a mismatch in flow table. + +- total_q_under_processor_handle + number of queues in an error state due to + an async error or errored command. +- send_queue_priority_update_flow + number of QP/SQ priority/SL update events. +- cq_overrun + number of times CQ entered an error state due to an overflow. +- async_eq_overrun + number of times an EQ mapped to async events was overrun. + comp_eq_overrun number of times an EQ mapped to completion events was + overrun. +- quota_exceeded_command + number of commands issued and failed due to quota exceeded. +- invalid_command + number of commands issued and failed dues to any reason other than quota + exceeded. +- nic_receive_steering_discard + number of packets that completed RX flow + steering but were discarded due to a mismatch in flow table. User commands examples: From 92059da65d84227fcc7cabbd96c0cca19880f4ff Mon Sep 17 00:00:00 2001 From: Bagas Sanjaya Date: Wed, 10 May 2023 10:54:14 +0700 Subject: [PATCH 104/303] Documentation: net/mlx5: Add blank line separator before numbered lists The doc forgets to add separator before numbered lists, which causes the lists to be appended to previous paragraph inline instead. Add the missing separator. Fixes: f2d51e579359b7 ("net/mlx5: Separate mlx5 driver documentation into multiple pages") Reviewed-by: Leon Romanovsky Signed-off-by: Bagas Sanjaya Reviewed-by: Simon Horman Signed-off-by: Saeed Mahameed --- .../device_drivers/ethernet/mellanox/mlx5/devlink.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst index 00687425d8b72..f962c0975d842 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst @@ -40,6 +40,7 @@ flow_steering_mode: Device flow steering mode --------------------------------------------- The flow steering mode parameter controls the flow steering mode of the driver. Two modes are supported: + 1. 'dmfs' - Device managed flow steering. 2. 'smfs' - Software/Driver managed flow steering. @@ -99,6 +100,7 @@ between representors and stacked devices. By default metadata is enabled on the supported devices in E-switch. Metadata is applicable only for E-switch in switchdev mode and users may disable it when NONE of the below use cases will be in use: + 1. HCA is in Dual/multi-port RoCE mode. 2. VF/SF representor bonding (Usually used for Live migration) 3. Stacked devices From bb72b94c659f5032850e9ab070d850bc743e3a0e Mon Sep 17 00:00:00 2001 From: Bagas Sanjaya Date: Wed, 10 May 2023 10:54:15 +0700 Subject: [PATCH 105/303] Documentation: net/mlx5: Wrap notes in admonition blocks Wrap note paragraphs in note:: directive as it better fit for the purpose of noting devlink commands. Fixes: f2d51e579359b7 ("net/mlx5: Separate mlx5 driver documentation into multiple pages") Fixes: cf14af140a5ad0 ("net/mlx5e: Add vnic devlink health reporter to representors") Reviewed-by: Leon Romanovsky Signed-off-by: Bagas Sanjaya Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/devlink.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst index f962c0975d842..3354ca3608ee6 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst @@ -182,7 +182,8 @@ User commands examples: $ devlink health diagnose pci/0000:82:00.0 reporter tx -NOTE: This command has valid output only when interface is up, otherwise the command has empty output. +.. note:: + This command has valid output only when interface is up, otherwise the command has empty output. - Show number of tx errors indicated, number of recover flows ended successfully, is autorecover enabled and graceful period from last recover:: @@ -234,8 +235,9 @@ User commands examples: $ devlink health dump show pci/0000:82:00.0 reporter fw -NOTE: This command can run only on the PF which has fw tracer ownership, -running it on other PF or any VF will return "Operation not permitted". +.. note:: + This command can run only on the PF which has fw tracer ownership, + running it on other PF or any VF will return "Operation not permitted". fw fatal reporter ----------------- @@ -258,7 +260,8 @@ User commands examples: $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal -NOTE: This command can run only on PF. +.. note:: + This command can run only on PF. vnic reporter ------------- @@ -299,4 +302,5 @@ User commands examples: $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic -NOTE: This command can run over all interfaces such as PF/VF and representor ports. +.. note:: + This command can run over all interfaces such as PF/VF and representor ports. From 095aabe338d166f3a9c87bcc9b9b84ba80fdaddf Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 22 May 2023 08:24:49 +0200 Subject: [PATCH 106/303] efi/libstub: zboot: Avoid eager evaluation of objcopy flags The Make variable containing the objcopy flags may be constructed from the output of build tools operating on build artifacts, and these may not exist when doing a make clean. So avoid evaluating them eagerly, to prevent spurious build warnings. Suggested-by: Pedro Falcato Tested-by: Alan Bartlett Signed-off-by: Ard Biesheuvel --- drivers/firmware/efi/libstub/Makefile.zboot | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 89ef820f3b344..2c489627a8078 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -32,7 +32,8 @@ zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0 $(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE $(call if_changed,$(zboot-method-y)) -OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \ +# avoid eager evaluation to prevent references to non-existent build artifacts +OBJCOPYFLAGS_vmlinuz.o = -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \ --rename-section .data=.gzdata,load,alloc,readonly,contents $(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE $(call if_changed,objcopy) From fd936fd8ac105ba3eb764185e8ba483c789c893e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 23 May 2023 21:01:30 +0200 Subject: [PATCH 107/303] efi: fix missing prototype warnings The cper.c file needs to include an extra header, and efi_zboot_entry needs an extern declaration to avoid these 'make W=1' warnings: drivers/firmware/efi/libstub/zboot.c:65:1: error: no previous prototype for 'efi_zboot_entry' [-Werror=missing-prototypes] drivers/firmware/efi/efi.c:176:16: error: no previous prototype for 'efi_attr_is_visible' [-Werror=missing-prototypes] drivers/firmware/efi/cper.c:626:6: error: no previous prototype for 'cper_estatus_print' [-Werror=missing-prototypes] drivers/firmware/efi/cper.c:649:5: error: no previous prototype for 'cper_estatus_check_header' [-Werror=missing-prototypes] drivers/firmware/efi/cper.c:662:5: error: no previous prototype for 'cper_estatus_check' [-Werror=missing-prototypes] To make this easier, move the cper specific declarations to include/linux/cper.h. Signed-off-by: Arnd Bergmann Acked-by: Rafael J. Wysocki Signed-off-by: Ard Biesheuvel --- drivers/acpi/apei/apei-internal.h | 6 ------ drivers/acpi/apei/bert.c | 1 + drivers/firmware/efi/libstub/efistub.h | 3 +++ include/linux/cper.h | 6 ++++++ include/linux/efi.h | 2 ++ 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 1d6ef96547252..67c2c3b959e15 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -7,7 +7,6 @@ #ifndef APEI_INTERNAL_H #define APEI_INTERNAL_H -#include #include struct apei_exec_context; @@ -130,10 +129,5 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) return sizeof(*estatus) + estatus->data_length; } -void cper_estatus_print(const char *pfx, - const struct acpi_hest_generic_status *estatus); -int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); -int cper_estatus_check(const struct acpi_hest_generic_status *estatus); - int apei_osc_setup(void); #endif diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index c23eb75866d02..7514e38d5640e 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "apei-internal.h" diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 67d5a20802e0b..54a2822cae771 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -1133,4 +1133,7 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record, void efi_remap_image(unsigned long image_base, unsigned alloc_size, unsigned long code_size); +asmlinkage efi_status_t __efiapi +efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab); + #endif diff --git a/include/linux/cper.h b/include/linux/cper.h index eacb7dd7b3af3..c1a7dc3251215 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -572,4 +572,10 @@ void cper_print_proc_ia(const char *pfx, int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg); int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg); +struct acpi_hest_generic_status; +void cper_estatus_print(const char *pfx, + const struct acpi_hest_generic_status *estatus); +int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); +int cper_estatus_check(const struct acpi_hest_generic_status *estatus); + #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 7aa62c92185f6..571d1a6e1b744 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1338,4 +1338,6 @@ bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table) return xen_efi_config_table_is_usable(guid, table); } +umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n); + #endif /* _LINUX_EFI_H */ From 1dd5483af494216e91c9a12f11992bcba483f944 Mon Sep 17 00:00:00 2001 From: Steve French Date: Wed, 24 May 2023 15:12:29 -0500 Subject: [PATCH 108/303] smb3: update a reviewer email in MAINTAINERS file Paulo has a new email address so update maintainers files. Signed-off-by: Steve French --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 207a65905f5e9..839cc375b205c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5130,7 +5130,7 @@ X: drivers/clk/clkdev.c COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3) M: Steve French -R: Paulo Alcantara (DFS, global name space) +R: Paulo Alcantara (DFS, global name space) R: Ronnie Sahlberg (directory leases, sparse files) R: Shyam Prasad N (multichannel) R: Tom Talpey (RDMA, smbdirect) From b535cc796a4b4942cd189652588e8d37c1f5925a Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 25 May 2023 18:53:28 -0500 Subject: [PATCH 109/303] smb3: missing null check in SMB2_change_notify If plen is null when passed in, we only checked for null in one of the two places where it could be used. Although plen is always valid (not null) for current callers of the SMB2_change_notify function, this change makes it more consistent. Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/all/202305251831.3V1gbbFs-lkp@intel.com/ Signed-off-by: Steve French --- fs/smb/client/smb2pdu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 9ed61b6f9b219..7063b395d22f2 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -3725,7 +3725,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, if (*out_data == NULL) { rc = -ENOMEM; goto cnotify_exit; - } else + } else if (plen) *plen = le32_to_cpu(smb_rsp->OutputBufferLength); } From d68cb7cf1fd0ef4287bc0ecd1ed0b6ae8e05fc70 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Wed, 24 May 2023 21:49:08 +0200 Subject: [PATCH 110/303] net: mellanox: mlxbf_gige: Fix skb_panic splat under memory pressure Do skb_put() after a new skb has been successfully allocated otherwise the reused skb leads to skb_panics or incorrect packet sizes. Fixes: f92e1869d74e ("Add Mellanox BlueField Gigabit Ethernet driver") Signed-off-by: Thomas Bogendoerfer Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230524194908.147145-1-tbogendoerfer@suse.de Signed-off-by: Jakub Kicinski --- .../ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c index afa3b92a6905f..0d5a41a2ae010 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c @@ -245,12 +245,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) skb = priv->rx_skb[rx_pi_rem]; - skb_put(skb, datalen); - - skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */ - - skb->protocol = eth_type_trans(skb, netdev); - /* Alloc another RX SKB for this same index */ rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ, &rx_buf_dma, DMA_FROM_DEVICE); @@ -259,6 +253,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) priv->rx_skb[rx_pi_rem] = rx_skb; dma_unmap_single(priv->dev, *rx_wqe_addr, MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE); + + skb_put(skb, datalen); + + skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */ + + skb->protocol = eth_type_trans(skb, netdev); + *rx_wqe_addr = rx_buf_dma; } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) { priv->stats.rx_mac_errors++; From ffb3322181d9e8db880202e4f00991764a35d812 Mon Sep 17 00:00:00 2001 From: Wei Fang Date: Wed, 24 May 2023 20:57:14 +0800 Subject: [PATCH 111/303] net: stmmac: fix call trace when stmmac_xdp_xmit() is invoked We encountered a kernel call trace issue which was related to ndo_xdp_xmit callback on our i.MX8MP platform. The reproduce steps show as follows. 1. The FEC port (eth0) connects to a PC port, and the PC uses pktgen_sample03_burst_single_flow.sh to generate packets and send these packets to the FEC port. Notice that the script must be executed before step 2. 2. Run the "./xdp_redirect eth0 eth1" command on i.MX8MP, the eth1 interface is the dwmac. Then there will be a call trace issue soon. Please see the log for more details. The root cause is that the NETDEV_XDP_ACT_NDO_XMIT feature is enabled by default, so when the step 2 command is exexcuted and packets have already been sent to eth0, the stmmac_xdp_xmit() starts running before the stmmac_xdp_set_prog() finishes. To resolve this issue, we disable the NETDEV_XDP_ACT_NDO_XMIT feature by default and turn on/off this feature when the bpf program is installed/uninstalled which just like the other ethernet drivers. Call Trace log: [ 306.311271] ------------[ cut here ]------------ [ 306.315910] WARNING: CPU: 0 PID: 15 at lib/timerqueue.c:55 timerqueue_del+0x68/0x70 [ 306.323590] Modules linked in: [ 306.326654] CPU: 0 PID: 15 Comm: ksoftirqd/0 Not tainted 6.4.0-rc1+ #37 [ 306.333277] Hardware name: NXP i.MX8MPlus EVK board (DT) [ 306.338591] pstate: 600000c5 (nZCv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 306.345561] pc : timerqueue_del+0x68/0x70 [ 306.349577] lr : __remove_hrtimer+0x5c/0xa0 [ 306.353777] sp : ffff80000b7c3920 [ 306.357094] x29: ffff80000b7c3920 x28: 0000000000000000 x27: 0000000000000001 [ 306.364244] x26: ffff80000a763a40 x25: ffff0000d0285a00 x24: 0000000000000001 [ 306.371390] x23: 0000000000000001 x22: ffff000179389a40 x21: 0000000000000000 [ 306.378537] x20: ffff000179389aa0 x19: ffff0000d2951308 x18: 0000000000001000 [ 306.385686] x17: f1d3000000000000 x16: 00000000c39c1000 x15: 55e99bbe00001a00 [ 306.392835] x14: 09000900120aa8c0 x13: e49af1d300000000 x12: 000000000000c39c [ 306.399987] x11: 100055e99bbe0000 x10: ffff8000090b1048 x9 : ffff8000081603fc [ 306.407133] x8 : 000000000000003c x7 : 000000000000003c x6 : 0000000000000001 [ 306.414284] x5 : ffff0000d2950980 x4 : 0000000000000000 x3 : 0000000000000000 [ 306.421432] x2 : 0000000000000001 x1 : ffff0000d2951308 x0 : ffff0000d2951308 [ 306.428585] Call trace: [ 306.431035] timerqueue_del+0x68/0x70 [ 306.434706] __remove_hrtimer+0x5c/0xa0 [ 306.438549] hrtimer_start_range_ns+0x2bc/0x370 [ 306.443089] stmmac_xdp_xmit+0x174/0x1b0 [ 306.447021] bq_xmit_all+0x194/0x4b0 [ 306.450612] __dev_flush+0x4c/0x98 [ 306.454024] xdp_do_flush+0x18/0x38 [ 306.457522] fec_enet_rx_napi+0x6c8/0xc68 [ 306.461539] __napi_poll+0x40/0x220 [ 306.465038] net_rx_action+0xf8/0x240 [ 306.468707] __do_softirq+0x128/0x3a8 [ 306.472378] run_ksoftirqd+0x40/0x58 [ 306.475961] smpboot_thread_fn+0x1c4/0x288 [ 306.480068] kthread+0x124/0x138 [ 306.483305] ret_from_fork+0x10/0x20 [ 306.486889] ---[ end trace 0000000000000000 ]--- Fixes: 66c0e13ad236 ("drivers: net: turn on XDP features") Signed-off-by: Wei Fang Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230524125714.357337-1-wei.fang@nxp.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3 +-- drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0fca81507a779..52cab9de05f27 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7233,8 +7233,7 @@ int stmmac_dvr_probe(struct device *device, ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_XSK_ZEROCOPY | - NETDEV_XDP_ACT_NDO_XMIT; + NETDEV_XDP_ACT_XSK_ZEROCOPY; ret = stmmac_tc_init(priv, priv); if (!ret) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index 9d4d8c3dad0a3..aa6f16d3df649 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -117,6 +117,9 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, return -EOPNOTSUPP; } + if (!prog) + xdp_features_clear_redirect_target(dev); + need_update = !!priv->xdp_prog != !!prog; if (if_running && need_update) stmmac_xdp_release(dev); @@ -131,5 +134,8 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, if (if_running && need_update) stmmac_xdp_open(dev); + if (prog) + xdp_features_set_redirect_target(dev, false); + return 0; } From 31642e7089df8fd3f54ca7843f7ee2952978cad1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 24 May 2023 14:14:56 +0000 Subject: [PATCH 112/303] netrom: fix info-leak in nr_write_internal() Simon Kapadia reported the following issue: The Online Amateur Radio Community (OARC) has recently been experimenting with building a nationwide packet network in the UK. As part of our experimentation, we have been testing out packet on 300bps HF, and playing with net/rom. For HF packet at this baud rate you really need to make sure that your MTU is relatively low; AX.25 suggests a PACLEN of 60, and a net/rom PACLEN of 40 to go with that. However the Linux net/rom support didn't work with a low PACLEN; the mkiss module would truncate packets if you set the PACLEN below about 200 or so, e.g.: Apr 19 14:00:51 radio kernel: [12985.747310] mkiss: ax1: truncating oversized transmit packet! This didn't make any sense to me (if the packets are smaller why would they be truncated?) so I started investigating. I looked at the packets using ethereal, and found that many were just huge compared to what I would expect. A simple net/rom connection request packet had the request and then a bunch of what appeared to be random data following it: Simon provided a patch that I slightly revised: Not only we must not use skb_tailroom(), we also do not want to count NR_NETWORK_LEN twice. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Co-Developed-by: Simon Kapadia Signed-off-by: Simon Kapadia Signed-off-by: Eric Dumazet Tested-by: Simon Kapadia Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230524141456.1045467-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/netrom/nr_subr.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c index 3f99b432ea707..e2d2af924cff4 100644 --- a/net/netrom/nr_subr.c +++ b/net/netrom/nr_subr.c @@ -123,7 +123,7 @@ void nr_write_internal(struct sock *sk, int frametype) unsigned char *dptr; int len, timeout; - len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; + len = NR_TRANSPORT_LEN; switch (frametype & 0x0F) { case NR_CONNREQ: @@ -141,7 +141,8 @@ void nr_write_internal(struct sock *sk, int frametype) return; } - if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) + skb = alloc_skb(NR_NETWORK_LEN + len, GFP_ATOMIC); + if (!skb) return; /* @@ -149,7 +150,7 @@ void nr_write_internal(struct sock *sk, int frametype) */ skb_reserve(skb, NR_NETWORK_LEN); - dptr = skb_put(skb, skb_tailroom(skb)); + dptr = skb_put(skb, len); switch (frametype & 0x0F) { case NR_CONNREQ: From 081e8df6819997eae236f75dd52f0c147c4be939 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 24 May 2023 10:07:12 -0700 Subject: [PATCH 113/303] tools: ynl: avoid dict errors on older Python versions Python 3.9.0 or newer supports combining dicts() with |, but older versions of Python are still used in the wild (e.g. on CentOS 8, which goes EoL May 31, 2024). With Python 3.6.8 we get: TypeError: unsupported operand type(s) for |: 'dict' and 'dict' Use older syntax. Tested with non-legacy families only. Fixes: f036d936ca57 ("tools: ynl: Add fixed-header support to ynl") Reviewed-by: Simon Horman Reviewed-by: Donald Hunter Tested-by: Donald Hunter Link: https://lore.kernel.org/r/20230524170712.2036128-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- tools/net/ynl/lib/ynl.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py index aa77bcae4807e..3144f33196be4 100644 --- a/tools/net/ynl/lib/ynl.py +++ b/tools/net/ynl/lib/ynl.py @@ -591,8 +591,9 @@ def _op(self, method, vals, dump=False): print('Unexpected message: ' + repr(gm)) continue - rsp.append(self._decode(gm.raw_attrs, op.attr_set.name) - | gm.fixed_header_attrs) + rsp_msg = self._decode(gm.raw_attrs, op.attr_set.name) + rsp_msg.update(gm.fixed_header_attrs) + rsp.append(rsp_msg) if not rsp: return None From 822b5a1c17df7e338b9f05d1cfe5764e37c7f74f Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 24 May 2023 16:29:34 -0700 Subject: [PATCH 114/303] af_packet: Fix data-races of pkt_sk(sk)->num. syzkaller found a data race of pkt_sk(sk)->num. The value is changed under lock_sock() and po->bind_lock, so we need READ_ONCE() to access pkt_sk(sk)->num without these locks in packet_bind_spkt(), packet_bind(), and sk_diag_fill(). Note that WRITE_ONCE() is already added by commit c7d2ef5dd4b0 ("net/packet: annotate accesses to po->bind"). BUG: KCSAN: data-race in packet_bind / packet_do_bind write (marked) to 0xffff88802ffd1cee of 2 bytes by task 7322 on cpu 0: packet_do_bind+0x446/0x640 net/packet/af_packet.c:3236 packet_bind+0x99/0xe0 net/packet/af_packet.c:3321 __sys_bind+0x19b/0x1e0 net/socket.c:1803 __do_sys_bind net/socket.c:1814 [inline] __se_sys_bind net/socket.c:1812 [inline] __x64_sys_bind+0x40/0x50 net/socket.c:1812 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3b/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc read to 0xffff88802ffd1cee of 2 bytes by task 7318 on cpu 1: packet_bind+0xbf/0xe0 net/packet/af_packet.c:3322 __sys_bind+0x19b/0x1e0 net/socket.c:1803 __do_sys_bind net/socket.c:1814 [inline] __se_sys_bind net/socket.c:1812 [inline] __x64_sys_bind+0x40/0x50 net/socket.c:1812 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3b/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc value changed: 0x0300 -> 0x0000 Reported by Kernel Concurrency Sanitizer on: CPU: 1 PID: 7318 Comm: syz-executor.4 Not tainted 6.3.0-13380-g7fddb5b5300c #4 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 Fixes: 96ec6327144e ("packet: Diag core and basic socket info dumping") Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzkaller Signed-off-by: Kuniyuki Iwashima Reviewed-by: Willem de Bruijn Link: https://lore.kernel.org/r/20230524232934.50950-1-kuniyu@amazon.com Signed-off-by: Jakub Kicinski --- net/packet/af_packet.c | 4 ++-- net/packet/diag.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 94c6a1ffa459a..a1f9a0e9f3c8a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3299,7 +3299,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min)); name[sizeof(uaddr->sa_data_min)] = 0; - return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); + return packet_do_bind(sk, name, 0, READ_ONCE(pkt_sk(sk)->num)); } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) @@ -3317,7 +3317,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len return -EINVAL; return packet_do_bind(sk, NULL, sll->sll_ifindex, - sll->sll_protocol ? : pkt_sk(sk)->num); + sll->sll_protocol ? : READ_ONCE(pkt_sk(sk)->num)); } static struct proto packet_proto = { diff --git a/net/packet/diag.c b/net/packet/diag.c index d0c4eda4cdc6f..f6b200cb3c066 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c @@ -143,7 +143,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, rp = nlmsg_data(nlh); rp->pdiag_family = AF_PACKET; rp->pdiag_type = sk->sk_type; - rp->pdiag_num = ntohs(po->num); + rp->pdiag_num = ntohs(READ_ONCE(po->num)); rp->pdiag_ino = sk_ino; sock_diag_save_cookie(sk, rp->pdiag_cookie); From 8a0d57df8938e9fd2e99d47a85b7f37d86f91097 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 24 May 2023 22:17:41 -0700 Subject: [PATCH 115/303] tls: improve lockless access safety of tls_err_abort() Most protos' poll() methods insert a memory barrier between writes to sk_err and sk_error_report(). This dates back to commit a4d258036ed9 ("tcp: Fix race in tcp_poll"). I guess we should do the same thing in TLS, tcp_poll() does not hold the socket lock. Fixes: 3c4d7559159b ("tls: kernel TLS support") Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/tls/tls_strp.c | 4 +++- net/tls/tls_sw.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index da95abbb7ea32..f37f4a0fcd3c2 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -20,7 +20,9 @@ static void tls_strp_abort_strp(struct tls_strparser *strp, int err) strp->stopped = 1; /* Report an error on the lower socket */ - strp->sk->sk_err = -err; + WRITE_ONCE(strp->sk->sk_err, -err); + /* Paired with smp_rmb() in tcp_poll() */ + smp_wmb(); sk_error_report(strp->sk); } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 6e6a7c37d685c..1a53c8f481e9a 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -70,7 +70,9 @@ noinline void tls_err_abort(struct sock *sk, int err) { WARN_ON_ONCE(err >= 0); /* sk->sk_err should contain a positive error code. */ - sk->sk_err = -err; + WRITE_ONCE(sk->sk_err, -err); + /* Paired with smp_rmb() in tcp_poll() */ + smp_wmb(); sk_error_report(sk); } From dc362e20cd6ab7a93d1b09669730c406f0910c35 Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Thu, 25 May 2023 23:56:12 +0530 Subject: [PATCH 116/303] amd-xgbe: fix the false linkup in xgbe_phy_status In the event of a change in XGBE mode, the current auto-negotiation needs to be reset and the AN cycle needs to be re-triggerred. However, the current code ignores the return value of xgbe_set_mode(), leading to false information as the link is declared without checking the status register. Fix this by propagating the mode switch status information to xgbe_phy_status(). Fixes: e57f7a3feaef ("amd-xgbe: Prepare for working with more than one type of phy") Co-developed-by: Sudheesh Mavila Signed-off-by: Sudheesh Mavila Reviewed-by: Simon Horman Acked-by: Shyam Sundar S K Signed-off-by: Raju Rangoju Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 33a9574e9e043..32d2c6fac6526 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1329,7 +1329,7 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata) return pdata->phy_if.phy_impl.an_outcome(pdata); } -static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) +static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata) { struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; @@ -1367,8 +1367,13 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_FULL; - if (xgbe_set_mode(pdata, mode) && pdata->an_again) + if (!xgbe_set_mode(pdata, mode)) + return false; + + if (pdata->an_again) xgbe_phy_reconfig_aneg(pdata); + + return true; } static void xgbe_phy_status(struct xgbe_prv_data *pdata) @@ -1398,7 +1403,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata) return; } - xgbe_phy_status_result(pdata); + if (xgbe_phy_status_result(pdata)) + return; if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) clear_bit(XGBE_LINK_INIT, &pdata->dev_state); From 9b9e46aa07273ceb96866b2e812b46f1ee0b8d2f Mon Sep 17 00:00:00 2001 From: Osama Muhammad Date: Thu, 25 May 2023 22:27:46 +0500 Subject: [PATCH 117/303] nfcsim.c: Fix error checking for debugfs_create_dir This patch fixes the error checking in nfcsim.c. The DebugFS kernel API is developed in a way that the caller can safely ignore the errors that occur during the creation of DebugFS nodes. Signed-off-by: Osama Muhammad Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/nfc/nfcsim.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c index 44eeb17ae48d9..a55381f80cd6e 100644 --- a/drivers/nfc/nfcsim.c +++ b/drivers/nfc/nfcsim.c @@ -336,10 +336,6 @@ static struct dentry *nfcsim_debugfs_root; static void nfcsim_debugfs_init(void) { nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL); - - if (!nfcsim_debugfs_root) - pr_err("Could not create debugfs entry\n"); - } static void nfcsim_debugfs_remove(void) From 8d73259ef23f449329294dc187932f7470268126 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 25 May 2023 14:20:38 -0700 Subject: [PATCH 118/303] perf ftrace latency: Remove unnecessary "--" from --use-nsec option The option name should not have the dashes. Current version shows four dashes for the option. $ perf ftrace latency -h Usage: perf ftrace [] [] or: perf ftrace [] -- [] [] or: perf ftrace {trace|latency} [] [] or: perf ftrace {trace|latency} [] -- [] [] -b, --use-bpf Use BPF to measure function latency -n, ----use-nsec Use nano-second histogram -T, --trace-funcs Show latency of given function Fixes: 84005bb6148618cc ("perf ftrace latency: Add -n/--use-nsec option") Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Changbin Du Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20230525212038.3535851-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-ftrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index 810e3376c7d6c..f9906f52e4fa7 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -1175,7 +1175,7 @@ int cmd_ftrace(int argc, const char **argv) OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf, "Use BPF to measure function latency"), #endif - OPT_BOOLEAN('n', "--use-nsec", &ftrace.use_nsec, + OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec, "Use nano-second histogram"), OPT_PARENT(common_options), }; From 420c4495b5e56cc11e6802ce98400544f698b393 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Tue, 9 May 2023 19:39:00 +0000 Subject: [PATCH 119/303] mtd: spi-nor: spansion: make sure local struct does not contain garbage Following errors were seen with um-x86_64-gcc12/um-allyesconfig: + /kisskb/src/drivers/mtd/spi-nor/spansion.c: error: 'op' is used uninitialized [-Werror=uninitialized]: => 495:27, 364:27 Initialise local struct spi_mem_op with all zeros at declaration in order to avoid using garbage data for fields that are not explicitly set afterwards. Reported-by: Geert Uytterhoeven Fixes: c87c9b11c53ce ("mtd: spi-nor: spansion: Determine current address mode") Fixes: 6afcc84080c41 ("mtd: spi-nor: spansion: Add support for Infineon S25FS256T") Signed-off-by: Tudor Ambarus Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230509193900.948753-1-tudor.ambarus@linaro.org --- drivers/mtd/spi-nor/spansion.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index 15f9a80c10b9b..36876aa849ede 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -361,7 +361,7 @@ static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor, */ static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor) { - struct spi_mem_op op; + struct spi_mem_op op = {}; u8 addr_mode; int ret; @@ -492,7 +492,7 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, const struct sfdp_bfpt *bfpt) { - struct spi_mem_op op; + struct spi_mem_op op = {}; int ret; ret = cypress_nor_set_addr_mode_nbytes(nor); From 650a8884a364ff2568b51cde9009cfd43cdae6ad Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 16 May 2023 22:21:24 +0200 Subject: [PATCH 120/303] mtd: rawnand: ingenic: fix empty stub helper definitions A few functions provide an empty interface definition when CONFIG_MTD_NAND_INGENIC_ECC is disabled, but they are accidentally defined as global functions in the header: drivers/mtd/nand/raw/ingenic/ingenic_ecc.h:39:5: error: no previous prototype for 'ingenic_ecc_calculate' drivers/mtd/nand/raw/ingenic/ingenic_ecc.h:46:5: error: no previous prototype for 'ingenic_ecc_correct' drivers/mtd/nand/raw/ingenic/ingenic_ecc.h:53:6: error: no previous prototype for 'ingenic_ecc_release' drivers/mtd/nand/raw/ingenic/ingenic_ecc.h:57:21: error: no previous prototype for 'of_ingenic_ecc_get' Turn them into 'static inline' definitions instead. Fixes: 15de8c6efd0e ("mtd: rawnand: ingenic: Separate top-level and SoC specific code") Signed-off-by: Arnd Bergmann Reviewed-by: Paul Cercueil Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230516202133.559488-1-arnd@kernel.org --- drivers/mtd/nand/raw/ingenic/ingenic_ecc.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h index 2cda439b5e11b..017868f59f222 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h +++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h @@ -36,25 +36,25 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc, void ingenic_ecc_release(struct ingenic_ecc *ecc); struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np); #else /* CONFIG_MTD_NAND_INGENIC_ECC */ -int ingenic_ecc_calculate(struct ingenic_ecc *ecc, +static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc, struct ingenic_ecc_params *params, const u8 *buf, u8 *ecc_code) { return -ENODEV; } -int ingenic_ecc_correct(struct ingenic_ecc *ecc, +static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc, struct ingenic_ecc_params *params, u8 *buf, u8 *ecc_code) { return -ENODEV; } -void ingenic_ecc_release(struct ingenic_ecc *ecc) +static inline void ingenic_ecc_release(struct ingenic_ecc *ecc) { } -struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np) +static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np) { return ERR_PTR(-ENODEV); } From 4095f4d9225a64921a6ee4bb8dbc94c6edc2df08 Mon Sep 17 00:00:00 2001 From: Tudor Ambarus Date: Thu, 18 May 2023 08:54:40 +0000 Subject: [PATCH 121/303] mtd: spi-nor: Fix divide by zero for spi-nor-generic flashes We failed to initialize n_banks for spi-nor-generic flashes, which caused a devide by zero when computing the bank_size. By default we consider that all chips have a single bank. Initialize the default number of banks for spi-nor-generic flashes. Even if the bug is fixed with this simple initialization, check the n_banks value before dividing so that we make sure this kind of bug won't occur again if some other struct instance is created uninitialized. Suggested-by: Todd Brandt Reported-by: Todd Brandt Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217448 Fixes: 9d6c5d64f028 ("mtd: spi-nor: Introduce the concept of bank") Link: https://lore.kernel.org/all/20230516225108.29194-1-todd.e.brandt@intel.com/ Signed-off-by: Tudor Ambarus Tested-by: Todd Brandt Reviewed-by: Miquel Raynal Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230518085440.2363676-1-tudor.ambarus@linaro.org --- drivers/mtd/spi-nor/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 0bb0ad14a2fc0..5f29fac8669a3 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2018,6 +2018,7 @@ static const struct spi_nor_manufacturer *manufacturers[] = { static const struct flash_info spi_nor_generic_flash = { .name = "spi-nor-generic", + .n_banks = 1, /* * JESD216 rev A doesn't specify the page size, therefore we need a * sane default. @@ -2921,7 +2922,8 @@ static void spi_nor_late_init_params(struct spi_nor *nor) if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) spi_nor_init_default_locking_ops(nor); - nor->params->bank_size = div64_u64(nor->params->size, nor->info->n_banks); + if (nor->info->n_banks > 1) + params->bank_size = div64_u64(params->size, nor->info->n_banks); } /** @@ -2987,6 +2989,7 @@ static void spi_nor_init_default_params(struct spi_nor *nor) /* Set SPI NOR sizes. */ params->writesize = 1; params->size = (u64)info->sector_size * info->n_sectors; + params->bank_size = params->size; params->page_size = info->page_size; if (!(info->flags & SPI_NOR_NO_FR)) { From 690917c647e245da76183656400d4926427a8b93 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 24 May 2023 17:03:07 -0700 Subject: [PATCH 122/303] perf bpf filter: Fix a broken perf sample data naming for BPF CO-RE BPF CO-RE requires 3 underscores for the ignored suffix rule but it mistakenly used only 2. Let's fix it. Fixes: 3a8b8fc3174891c4 ("perf bpf filter: Support pre-5.16 kernels where 'mem_hops' isn't in 'union perf_mem_data_src'") Signed-off-by: Namhyung Kim Acked-by: Andrii Nakryiko Acked-by: John Fastabend Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Cc: Song Liu Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20230525000307.3202449-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/bpf_skel/sample_filter.bpf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c index cffe493af1ed5..fb94f52806266 100644 --- a/tools/perf/util/bpf_skel/sample_filter.bpf.c +++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c @@ -25,7 +25,7 @@ struct perf_sample_data___new { } __attribute__((preserve_access_index)); /* new kernel perf_mem_data_src definition */ -union perf_mem_data_src__new { +union perf_mem_data_src___new { __u64 val; struct { __u64 mem_op:5, /* type of opcode */ @@ -108,7 +108,7 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx, if (entry->part == 7) return kctx->data->data_src.mem_blk; if (entry->part == 8) { - union perf_mem_data_src__new *data = (void *)&kctx->data->data_src; + union perf_mem_data_src___new *data = (void *)&kctx->data->data_src; if (bpf_core_field_exists(data->mem_hops)) return data->mem_hops; From a0c2f92d36d20d72efb27cbe8669037321e92f22 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 15 May 2023 09:50:39 -0700 Subject: [PATCH 123/303] perf arm: Fix include path to cs-etm.h Change "../cs-etm.h" to just "../../../util/cs-etm.h" as ../cs-etm.h doesn't exist. Suggested-by: Leo Yan Reviewed-by: Leo Yan Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Mark Rutland Cc: Mathieu Poirier Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Suzuki Poulouse Cc: Will Deacon Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230515165039.544045-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm/util/pmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c index 860a8b42b4b5b..a9623b128ece2 100644 --- a/tools/perf/arch/arm/util/pmu.c +++ b/tools/perf/arch/arm/util/pmu.c @@ -12,7 +12,7 @@ #include "arm-spe.h" #include "hisi-ptt.h" #include "../../../util/pmu.h" -#include "../cs-etm.h" +#include "../../../util/cs-etm.h" struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused) From 65cd8e5534f873b76b55fb17e942c512ee3699d9 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 17 Apr 2023 12:25:46 -0700 Subject: [PATCH 124/303] perf build: Don't compile demangle-cxx.cpp if not necessary demangle-cxx.cpp requires a C++ compiler, but feature checks may fail because of the absence of this. Add a CONFIG_CXX_DEMANGLE so that the source isn't built if not supported. Copy libbfd and cplus demangle variants to a weak symbol-elf.c version so they aren't dependent on C++. These variants are only built with the build option BUILD_NONDISTRO=1. Committer note: This also handles this build break when a C++ compiler isn't available: CXX /tmp/build/perf/util/demangle-cxx.o /bin/sh: g++: command not found Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Leo Yan Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Qi Liu Cc: Ravi Bangoria Link: https://lore.kernel.org/r/20230417192546.99923-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 1 + tools/perf/util/Build | 2 +- tools/perf/util/symbol-elf.c | 27 +++++++++++++++++++++++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 70268442f7eec..a794d9eca93d8 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -927,6 +927,7 @@ ifndef NO_DEMANGLE EXTLIBS += -lstdc++ CFLAGS += -DHAVE_CXA_DEMANGLE_SUPPORT CXXFLAGS += -DHAVE_CXA_DEMANGLE_SUPPORT + $(call detected,CONFIG_CXX_DEMANGLE) endif ifdef BUILD_NONDISTRO ifeq ($(filter -liberty,$(EXTLIBS)),) diff --git a/tools/perf/util/Build b/tools/perf/util/Build index bd18fe5f27195..f9df1df1eec03 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -214,7 +214,7 @@ perf-$(CONFIG_ZSTD) += zstd.o perf-$(CONFIG_LIBCAP) += cap.o -perf-y += demangle-cxx.o +perf-$(CONFIG_CXX_DEMANGLE) += demangle-cxx.o perf-y += demangle-ocaml.o perf-y += demangle-java.o perf-y += demangle-rust.o diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index b2ed9cc522655..63882a4db5c74 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -31,6 +31,13 @@ #include #endif +#if defined(HAVE_LIBBFD_SUPPORT) || defined(HAVE_CPLUS_DEMANGLE_SUPPORT) +#ifndef DMGL_PARAMS +#define DMGL_PARAMS (1 << 0) /* Include function args */ +#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ +#endif +#endif + #ifndef EM_AARCH64 #define EM_AARCH64 183 /* ARM 64 bit */ #endif @@ -271,6 +278,26 @@ static bool want_demangle(bool is_kernel_sym) return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; } +/* + * Demangle C++ function signature, typically replaced by demangle-cxx.cpp + * version. + */ +__weak char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused, + bool modifiers __maybe_unused) +{ +#ifdef HAVE_LIBBFD_SUPPORT + int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0); + + return bfd_demangle(NULL, str, flags); +#elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT) + int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0); + + return cplus_demangle(str, flags); +#else + return NULL; +#endif +} + static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name) { char *demangled = NULL; From 251c01e27feee83ff8fa294d33277616c9032dd0 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 26 May 2023 15:30:23 -0300 Subject: [PATCH 125/303] perf bpf: Do not use llvm-strip on BPF binary llvm-strip is not really required. Remove this dependency to make it easier to build perf with BUILD_BPF_SKEL=1. Committer notes: This removes the need for the 'llvm' package just to get llvm-strip. Reported-by: Arnaldo Carvalho de Melo Signed-off-by: Song Liu Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.perf | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 1593c5dcaa9e8..f48794816d82a 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -181,7 +181,6 @@ HOSTCC ?= gcc HOSTLD ?= ld HOSTAR ?= ar CLANG ?= clang -LLVM_STRIP ?= llvm-strip PKG_CONFIG = $(CROSS_COMPILE)pkg-config @@ -1083,7 +1082,7 @@ $(BPFTOOL): | $(SKEL_TMP_OUT) $(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT) $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \ - -c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ && $(LLVM_STRIP) -g $@ + -c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ $(SKEL_OUT)/%.skel.h: $(SKEL_TMP_OUT)/%.bpf.o | $(BPFTOOL) $(QUIET_GENSKEL)$(BPFTOOL) gen skeleton $< > $@ From 15d4371bac978bb96b36c37e822cdae24c0dcd77 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 22 May 2023 11:26:04 +0100 Subject: [PATCH 126/303] perf cs-etm: Copy kernel coresight-pmu.h header Copy the kernel version of the header to fix the header diff build warning. Some new definitions were only added to the tools side header, but these are only used in Perf so move them to a different header. Signed-off-by: James Clark Reviewed-by: Mike Leach Link: https://lore.kernel.org/r/20230522102604.1081416-1-james.clark@arm.com Cc: Mark Rutland Cc: Suzuki K Poulose Cc: Ian Rogers Cc: Peter Zijlstra Cc: Adrian Hunter Cc: acme@kernel.org Cc: Jiri Olsa Cc: Namhyung Kim Cc: Will Deacon Cc: Leo Yan Cc: Alexander Shishkin Cc: linux-arm-kernel@lists.infradead.org Cc: coresight@lists.linaro.org Cc: siyanteng@loongson.cn Cc: John Garry Cc: Ingo Molnar Cc: linux-kernel@vger.kernel.org Cc: linux-perf-users@vger.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/linux/coresight-pmu.h | 13 ------------- tools/perf/util/cs-etm.h | 13 +++++++++++++ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h index cef3b1c25335a..51ac441a37c3e 100644 --- a/tools/include/linux/coresight-pmu.h +++ b/tools/include/linux/coresight-pmu.h @@ -21,19 +21,6 @@ */ #define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2)) -/* CoreSight trace ID is currently the bottom 7 bits of the value */ -#define CORESIGHT_TRACE_ID_VAL_MASK GENMASK(6, 0) - -/* - * perf record will set the legacy meta data values as unused initially. - * This allows perf report to manage the decoders created when dynamic - * allocation in operation. - */ -#define CORESIGHT_TRACE_ID_UNUSED_FLAG BIT(31) - -/* Value to set for unused trace ID values */ -#define CORESIGHT_TRACE_ID_UNUSED_VAL 0x7F - /* * Below are the definition of bit offsets for perf option, and works as * arbitrary values for all ETM versions. diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h index 70cac0375b340..ecca40787ac9a 100644 --- a/tools/perf/util/cs-etm.h +++ b/tools/perf/util/cs-etm.h @@ -227,6 +227,19 @@ struct cs_etm_packet_queue { #define INFO_HEADER_SIZE (sizeof(((struct perf_record_auxtrace_info *)0)->type) + \ sizeof(((struct perf_record_auxtrace_info *)0)->reserved__)) +/* CoreSight trace ID is currently the bottom 7 bits of the value */ +#define CORESIGHT_TRACE_ID_VAL_MASK GENMASK(6, 0) + +/* + * perf record will set the legacy meta data values as unused initially. + * This allows perf report to manage the decoders created when dynamic + * allocation in operation. + */ +#define CORESIGHT_TRACE_ID_UNUSED_FLAG BIT(31) + +/* Value to set for unused trace ID values */ +#define CORESIGHT_TRACE_ID_UNUSED_VAL 0x7F + int cs_etm__process_auxtrace_info(union perf_event *event, struct perf_session *session); struct perf_event_attr *cs_etm_get_default_config(struct perf_pmu *pmu); From c8268a9b91055c992909a0845cfa59c624908da8 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 26 May 2023 15:45:03 -0300 Subject: [PATCH 127/303] tools headers UAPI: Sync the linux/in.h with the kernel sources Picking the changes from: 3632679d9e4f879f ("ipv{4,6}/raw: fix output xfrm lookup wrt protocol") That includes a define (IP_PROTOCOL) that isn't being used in generating any id -> string table used by 'perf trace'. Addresses this perf build warning: Warning: Kernel ABI header at 'tools/include/uapi/linux/in.h' differs from latest version at 'include/uapi/linux/in.h' diff -u tools/include/uapi/linux/in.h include/uapi/linux/in.h Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Cc: Nicolas Dichtel Cc: Paolo Abeni Link: https://lore.kernel.org/lkml/ZHD/Ms0DMq7viaq+@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/in.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h index 4b7f2df66b995..e682ab628dfa6 100644 --- a/tools/include/uapi/linux/in.h +++ b/tools/include/uapi/linux/in.h @@ -163,6 +163,7 @@ struct in_addr { #define IP_MULTICAST_ALL 49 #define IP_UNICAST_IF 50 #define IP_LOCAL_PORT_RANGE 51 +#define IP_PROTOCOL 52 #define MCAST_EXCLUDE 0 #define MCAST_INCLUDE 1 From c041d33bf7ec731bb71f47e4d45a7aec9e40b1b9 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 19 May 2023 16:57:57 -0700 Subject: [PATCH 128/303] perf evsel: Separate bpf_counter_list and bpf_filters, can be used at the same time 'struct evsel' uses a union for the two lists. This turned out to be error prone. For example: [root@quaco ~]# perf stat --bpf-prog 5246 Error: cpu-clock event does not have sample flags 66c660 failed to set filter "(null)" on event cpu-clock with 2 (No such file or directory) [root@quaco ~]# perf stat --bpf-prog 5246 Fix this issue by separating the two lists. Fixes: 56ec9457a4a20c5e ("perf bpf filter: Implement event sample filtering") Signed-off-by: Song Liu Acked-by: Namhyung Kim Cc: Jiri Olsa Cc: Namhyung Kim Cc: kernel-team@meta.com Link: https://lore.kernel.org/r/20230519235757.3613719-1-song@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 1 + tools/perf/util/evsel.h | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 2f5910b31fa93..c2dbb5647e754 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -282,6 +282,7 @@ void evsel__init(struct evsel *evsel, evsel->bpf_fd = -1; INIT_LIST_HEAD(&evsel->config_terms); INIT_LIST_HEAD(&evsel->bpf_counter_list); + INIT_LIST_HEAD(&evsel->bpf_filters); perf_evsel__object.init(evsel); evsel->sample_size = __evsel__sample_size(attr->sample_type); evsel__calc_id_pos(evsel); diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index df8928745fc65..0f54f28a69c25 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -151,10 +151,8 @@ struct evsel { */ struct bpf_counter_ops *bpf_counter_ops; - union { - struct list_head bpf_counter_list; /* for perf-stat -b */ - struct list_head bpf_filters; /* for perf-record --filter */ - }; + struct list_head bpf_counter_list; /* for perf-stat -b */ + struct list_head bpf_filters; /* for perf-record --filter */ /* for perf-stat --use-bpf */ int bperf_leader_prog_fd; From 45c2f36871955b51b4ce083c447388d8c72d6b91 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 15 May 2023 11:18:21 +0200 Subject: [PATCH 129/303] btrfs: call btrfs_orig_bbio_end_io in btrfs_end_bio_work MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When I implemented the storage layer bio splitting, I was under the assumption that we'll never split metadata bios. But Qu reminded me that this can actually happen with very old file systems with unaligned metadata chunks and RAID0. I still haven't seen such a case in practice, but we better handled this case, especially as it is fairly easily to do not calling the ->end_іo method directly in btrfs_end_io_work, and using the proper btrfs_orig_bbio_end_io helper instead. In addition to the old file system with unaligned metadata chunks case documented in the commit log, the combination of the new scrub code with Johannes pending raid-stripe-tree also triggers this case. We spent some time debugging it and found that this patch solves the problem. Fixes: 103c19723c80 ("btrfs: split the bio submission path into a separate file") CC: stable@vger.kernel.org # 6.3+ Reviewed-by: Johannes Thumshirn Tested-by: Johannes Thumshirn Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/bio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 5379c4714905e..35d34c731d72c 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -330,7 +330,7 @@ static void btrfs_end_bio_work(struct work_struct *work) if (bbio->inode && !(bbio->bio.bi_opf & REQ_META)) btrfs_check_read_bio(bbio, bbio->bio.bi_private); else - bbio->end_io(bbio); + btrfs_orig_bbio_end_io(bbio); } static void btrfs_simple_end_io(struct bio *bio) From 8fd9f4232d8152c650fd15127f533a0f6d0a4b2b Mon Sep 17 00:00:00 2001 From: Shida Zhang Date: Tue, 16 May 2023 09:34:30 +0800 Subject: [PATCH 130/303] btrfs: fix an uninitialized variable warning in btrfs_log_inode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes the following warning reported by gcc 10.2.1 under x86_64: ../fs/btrfs/tree-log.c: In function ‘btrfs_log_inode’: ../fs/btrfs/tree-log.c:6211:9: error: ‘last_range_start’ may be used uninitialized in this function [-Werror=maybe-uninitialized] 6211 | ret = insert_dir_log_key(trans, log, path, key.objectid, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6212 | first_dir_index, last_dir_index); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ../fs/btrfs/tree-log.c:6161:6: note: ‘last_range_start’ was declared here 6161 | u64 last_range_start; | ^~~~~~~~~~~~~~~~ This might be a false positive fixed in later compiler versions but we want to have it fixed. Reported-by: k2ci Reviewed-by: Anand Jain Signed-off-by: Shida Zhang Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9b212e8c70cc5..d2755d5e338b5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -6158,7 +6158,7 @@ static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans, { struct btrfs_root *log = inode->root->log_root; const struct btrfs_delayed_item *curr; - u64 last_range_start; + u64 last_range_start = 0; u64 last_range_end = 0; struct btrfs_key key; From 5ad9b4719fc9bc4715c7e19875a962095b0577e7 Mon Sep 17 00:00:00 2001 From: pengfuyuan Date: Tue, 23 May 2023 15:09:55 +0800 Subject: [PATCH 131/303] btrfs: fix csum_tree_block page iteration to avoid tripping on -Werror=array-bounds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When compiling on a MIPS 64-bit machine we get these warnings: In file included from ./arch/mips/include/asm/cacheflush.h:13, from ./include/linux/cacheflush.h:5, from ./include/linux/highmem.h:8, from ./include/linux/bvec.h:10, from ./include/linux/blk_types.h:10, from ./include/linux/blkdev.h:9, from fs/btrfs/disk-io.c:7: fs/btrfs/disk-io.c: In function ‘csum_tree_block’: fs/btrfs/disk-io.c:100:34: error: array subscript 1 is above array bounds of ‘struct page *[1]’ [-Werror=array-bounds] 100 | kaddr = page_address(buf->pages[i]); | ~~~~~~~~~~^~~ ./include/linux/mm.h:2135:48: note: in definition of macro ‘page_address’ 2135 | #define page_address(page) lowmem_page_address(page) | ^~~~ cc1: all warnings being treated as errors We can check if i overflows to solve the problem. However, this doesn't make much sense, since i == 1 and num_pages == 1 doesn't execute the body of the loop. In addition, i < num_pages can also ensure that buf->pages[i] will not cross the boundary. Unfortunately, this doesn't help with the problem observed here: gcc still complains. To fix this add a compile-time condition for the extent buffer page array size limit, which would eventually lead to eliminating the whole for loop. CC: stable@vger.kernel.org # 5.10+ Signed-off-by: pengfuyuan Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6de6dcf2743e2..2b1b227505f3a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -96,7 +96,7 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result) crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, first_page_part - BTRFS_CSUM_SIZE); - for (i = 1; i < num_pages; i++) { + for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) { kaddr = page_address(buf->pages[i]); crypto_shash_update(shash, kaddr, PAGE_SIZE); } From 48b47f0caaa8a9f05ed803cb4f335fa3a7bfc622 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Fri, 12 May 2023 17:05:41 +0900 Subject: [PATCH 132/303] ksmbd: fix uninitialized pointer read in ksmbd_vfs_rename() Uninitialized rd.delegated_inode can be used in vfs_rename(). Fix this by setting rd.delegated_inode to NULL to avoid the uninitialized read. Fixes: 74d7970febf7 ("ksmbd: fix racy issue from using ->d_parent and ->d_name") Reported-by: Coverity Scan Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/vfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 778c152708e43..9bdb01c5b2015 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -743,6 +743,7 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path, rd.new_dir = new_path.dentry->d_inode, rd.new_dentry = new_dentry, rd.flags = flags, + rd.delegated_inode = NULL, err = vfs_rename(&rd); if (err) ksmbd_debug(VFS, "vfs_rename failed err %d\n", err); From df14afeed2e6c1bbadef7d2f9c46887bbd6d8d94 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sun, 14 May 2023 10:02:27 +0900 Subject: [PATCH 133/303] ksmbd: fix uninitialized pointer read in smb2_create_link() There is a case that file_present is true and path is uninitialized. This patch change file_present is set to false by default and set to true when patch is initialized. Fixes: 74d7970febf7 ("ksmbd: fix racy issue from using ->d_parent and ->d_name") Reported-by: Coverity Scan Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 717bcd20545b7..1632b2a1e5168 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -5506,7 +5506,7 @@ static int smb2_create_link(struct ksmbd_work *work, { char *link_name = NULL, *target_name = NULL, *pathname = NULL; struct path path; - bool file_present = true; + bool file_present = false; int rc; if (buf_len < (u64)sizeof(struct smb2_file_link_info) + @@ -5539,8 +5539,8 @@ static int smb2_create_link(struct ksmbd_work *work, if (rc) { if (rc != -ENOENT) goto out; - file_present = false; - } + } else + file_present = true; if (file_info->ReplaceIfExists) { if (file_present) { From 84c5aa47925a1f40d698b6a6a2bf67e99617433d Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Fri, 12 May 2023 23:29:12 +0900 Subject: [PATCH 134/303] ksmbd: fix credit count leakage This patch fix the failure from smb2.credits.single_req_credits_granted test. When client send 8192 credit request, ksmbd return 8191 credit granted. ksmbd should give maximum possible credits that must be granted within the range of not exceeding the max credit to client. Cc: stable@vger.kernel.org Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 1632b2a1e5168..bec885c007ce2 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -326,13 +326,9 @@ int smb2_set_rsp_credits(struct ksmbd_work *work) if (hdr->Command == SMB2_NEGOTIATE) aux_max = 1; else - aux_max = conn->vals->max_credits - credit_charge; + aux_max = conn->vals->max_credits - conn->total_credits; credits_granted = min_t(unsigned short, credits_requested, aux_max); - if (conn->vals->max_credits - conn->total_credits < credits_granted) - credits_granted = conn->vals->max_credits - - conn->total_credits; - conn->total_credits += credits_granted; work->credits_granted += credits_granted; From d738950f112c8f40f0515fe967db998e8235a175 Mon Sep 17 00:00:00 2001 From: Kuan-Ting Chen Date: Fri, 19 May 2023 22:59:28 +0900 Subject: [PATCH 135/303] ksmbd: fix slab-out-of-bounds read in smb2_handle_negotiate Check request_buf length first to avoid out-of-bounds read by req->DialectCount. [ 3350.990282] BUG: KASAN: slab-out-of-bounds in smb2_handle_negotiate+0x35d7/0x3e60 [ 3350.990282] Read of size 2 at addr ffff88810ad61346 by task kworker/5:0/276 [ 3351.000406] Workqueue: ksmbd-io handle_ksmbd_work [ 3351.003499] Call Trace: [ 3351.006473] [ 3351.006473] dump_stack_lvl+0x8d/0xe0 [ 3351.006473] print_report+0xcc/0x620 [ 3351.006473] kasan_report+0x92/0xc0 [ 3351.006473] smb2_handle_negotiate+0x35d7/0x3e60 [ 3351.014760] ksmbd_smb_negotiate_common+0x7a7/0xf00 [ 3351.014760] handle_ksmbd_work+0x3f7/0x12d0 [ 3351.014760] process_one_work+0xa85/0x1780 Cc: stable@vger.kernel.org Signed-off-by: Kuan-Ting Chen Acked-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index bec885c007ce2..83c668243c95a 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -1053,16 +1053,16 @@ int smb2_handle_negotiate(struct ksmbd_work *work) return rc; } - if (req->DialectCount == 0) { - pr_err("malformed packet\n"); + smb2_buf_len = get_rfc1002_len(work->request_buf); + smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects); + if (smb2_neg_size > smb2_buf_len) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; } - smb2_buf_len = get_rfc1002_len(work->request_buf); - smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects); - if (smb2_neg_size > smb2_buf_len) { + if (req->DialectCount == 0) { + pr_err("malformed packet\n"); rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; From 0512a5f89e1fae74251fde6893ff634f1c96c6fb Mon Sep 17 00:00:00 2001 From: Kuan-Ting Chen Date: Fri, 19 May 2023 23:00:24 +0900 Subject: [PATCH 136/303] ksmbd: fix multiple out-of-bounds read during context decoding Check the remaining data length before accessing the context structure to ensure that the entire structure is contained within the packet. Additionally, since the context data length `ctxt_len` has already been checked against the total packet length `len_of_ctxts`, update the comparison to use `ctxt_len`. Cc: stable@vger.kernel.org Signed-off-by: Kuan-Ting Chen Acked-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 53 ++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 19 deletions(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 83c668243c95a..a1939ff7c742a 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -845,13 +845,14 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn, static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn, struct smb2_preauth_neg_context *pneg_ctxt, - int len_of_ctxts) + int ctxt_len) { /* * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt, * which may not be present. Only check for used HashAlgorithms[1]. */ - if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN) + if (ctxt_len < + sizeof(struct smb2_neg_context) + MIN_PREAUTH_CTXT_DATA_LEN) return STATUS_INVALID_PARAMETER; if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) @@ -863,15 +864,23 @@ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn, static void decode_encrypt_ctxt(struct ksmbd_conn *conn, struct smb2_encryption_neg_context *pneg_ctxt, - int len_of_ctxts) + int ctxt_len) { - int cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount); - int i, cphs_size = cph_cnt * sizeof(__le16); + int cph_cnt; + int i, cphs_size; + + if (sizeof(struct smb2_encryption_neg_context) > ctxt_len) { + pr_err("Invalid SMB2_ENCRYPTION_CAPABILITIES context size\n"); + return; + } conn->cipher_type = 0; + cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount); + cphs_size = cph_cnt * sizeof(__le16); + if (sizeof(struct smb2_encryption_neg_context) + cphs_size > - len_of_ctxts) { + ctxt_len) { pr_err("Invalid cipher count(%d)\n", cph_cnt); return; } @@ -919,15 +928,22 @@ static void decode_compress_ctxt(struct ksmbd_conn *conn, static void decode_sign_cap_ctxt(struct ksmbd_conn *conn, struct smb2_signing_capabilities *pneg_ctxt, - int len_of_ctxts) + int ctxt_len) { - int sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount); - int i, sign_alos_size = sign_algo_cnt * sizeof(__le16); + int sign_algo_cnt; + int i, sign_alos_size; + + if (sizeof(struct smb2_signing_capabilities) > ctxt_len) { + pr_err("Invalid SMB2_SIGNING_CAPABILITIES context length\n"); + return; + } conn->signing_negotiated = false; + sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount); + sign_alos_size = sign_algo_cnt * sizeof(__le16); if (sizeof(struct smb2_signing_capabilities) + sign_alos_size > - len_of_ctxts) { + ctxt_len) { pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt); return; } @@ -965,18 +981,16 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, len_of_ctxts = len_of_smb - offset; while (i++ < neg_ctxt_cnt) { - int clen; - - /* check that offset is not beyond end of SMB */ - if (len_of_ctxts == 0) - break; + int clen, ctxt_len; if (len_of_ctxts < sizeof(struct smb2_neg_context)) break; pctx = (struct smb2_neg_context *)((char *)pctx + offset); clen = le16_to_cpu(pctx->DataLength); - if (clen + sizeof(struct smb2_neg_context) > len_of_ctxts) + ctxt_len = clen + sizeof(struct smb2_neg_context); + + if (ctxt_len > len_of_ctxts) break; if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) { @@ -987,7 +1001,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, status = decode_preauth_ctxt(conn, (struct smb2_preauth_neg_context *)pctx, - len_of_ctxts); + ctxt_len); if (status != STATUS_SUCCESS) break; } else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) { @@ -998,7 +1012,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, decode_encrypt_ctxt(conn, (struct smb2_encryption_neg_context *)pctx, - len_of_ctxts); + ctxt_len); } else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) { ksmbd_debug(SMB, "deassemble SMB2_COMPRESSION_CAPABILITIES context\n"); @@ -1017,9 +1031,10 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, } else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) { ksmbd_debug(SMB, "deassemble SMB2_SIGNING_CAPABILITIES context\n"); + decode_sign_cap_ctxt(conn, (struct smb2_signing_capabilities *)pctx, - len_of_ctxts); + ctxt_len); } /* offsets must be 8 byte aligned */ From 36322523dddb11107e9f7f528675a0dec2536103 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Fri, 19 May 2023 23:09:48 +0900 Subject: [PATCH 137/303] ksmbd: fix UAF issue from opinfo->conn If opinfo->conn is another connection and while ksmbd send oplock break request to cient on current connection, The connection for opinfo->conn can be disconnect and conn could be freed. When sending oplock break request, this ksmbd_conn can be used and cause user-after-free issue. When getting opinfo from the list, ksmbd check connection is being released. If it is not released, Increase ->r_count to wait that connection is freed. Cc: stable@vger.kernel.org Reported-by: Per Forlin Tested-by: Per Forlin Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/oplock.c | 72 +++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 25 deletions(-) diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 6d1ccb9998939..db181bdad73a2 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -157,13 +157,42 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci) rcu_read_lock(); opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info, op_entry); - if (opinfo && !atomic_inc_not_zero(&opinfo->refcount)) - opinfo = NULL; + if (opinfo) { + if (!atomic_inc_not_zero(&opinfo->refcount)) + opinfo = NULL; + else { + atomic_inc(&opinfo->conn->r_count); + if (ksmbd_conn_releasing(opinfo->conn)) { + atomic_dec(&opinfo->conn->r_count); + atomic_dec(&opinfo->refcount); + opinfo = NULL; + } + } + } + rcu_read_unlock(); return opinfo; } +static void opinfo_conn_put(struct oplock_info *opinfo) +{ + struct ksmbd_conn *conn; + + if (!opinfo) + return; + + conn = opinfo->conn; + /* + * Checking waitqueue to dropping pending requests on + * disconnection. waitqueue_active is safe because it + * uses atomic operation for condition. + */ + if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) + wake_up(&conn->r_count_q); + opinfo_put(opinfo); +} + void opinfo_put(struct oplock_info *opinfo) { if (!atomic_dec_and_test(&opinfo->refcount)) @@ -666,13 +695,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk) out: ksmbd_free_work_struct(work); - /* - * Checking waitqueue to dropping pending requests on - * disconnection. waitqueue_active is safe because it - * uses atomic operation for condition. - */ - if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) - wake_up(&conn->r_count_q); } /** @@ -706,7 +728,6 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo) work->conn = conn; work->sess = opinfo->sess; - atomic_inc(&conn->r_count); if (opinfo->op_state == OPLOCK_ACK_WAIT) { INIT_WORK(&work->work, __smb2_oplock_break_noti); ksmbd_queue_work(work); @@ -776,13 +797,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk) out: ksmbd_free_work_struct(work); - /* - * Checking waitqueue to dropping pending requests on - * disconnection. waitqueue_active is safe because it - * uses atomic operation for condition. - */ - if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) - wake_up(&conn->r_count_q); } /** @@ -822,7 +836,6 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo) work->conn = conn; work->sess = opinfo->sess; - atomic_inc(&conn->r_count); if (opinfo->op_state == OPLOCK_ACK_WAIT) { list_for_each_safe(tmp, t, &opinfo->interim_list) { struct ksmbd_work *in_work; @@ -1144,8 +1157,10 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid, } prev_opinfo = opinfo_get_list(ci); if (!prev_opinfo || - (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) + (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) { + opinfo_conn_put(prev_opinfo); goto set_lev; + } prev_op_has_lease = prev_opinfo->is_lease; if (prev_op_has_lease) prev_op_state = prev_opinfo->o_lease->state; @@ -1153,19 +1168,19 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid, if (share_ret < 0 && prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) { err = share_ret; - opinfo_put(prev_opinfo); + opinfo_conn_put(prev_opinfo); goto err_out; } if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH && prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) { - opinfo_put(prev_opinfo); + opinfo_conn_put(prev_opinfo); goto op_break_not_needed; } list_add(&work->interim_entry, &prev_opinfo->interim_list); err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II); - opinfo_put(prev_opinfo); + opinfo_conn_put(prev_opinfo); if (err == -ENOENT) goto set_lev; /* Check all oplock was freed by close */ @@ -1228,14 +1243,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work, return; if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH && brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) { - opinfo_put(brk_opinfo); + opinfo_conn_put(brk_opinfo); return; } brk_opinfo->open_trunc = is_trunc; list_add(&work->interim_entry, &brk_opinfo->interim_list); oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II); - opinfo_put(brk_opinfo); + opinfo_conn_put(brk_opinfo); } /** @@ -1263,6 +1278,13 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) { if (!atomic_inc_not_zero(&brk_op->refcount)) continue; + + atomic_inc(&brk_op->conn->r_count); + if (ksmbd_conn_releasing(brk_op->conn)) { + atomic_dec(&brk_op->conn->r_count); + continue; + } + rcu_read_unlock(); if (brk_op->is_lease && (brk_op->o_lease->state & (~(SMB2_LEASE_READ_CACHING_LE | @@ -1292,7 +1314,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, brk_op->open_trunc = is_trunc; oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE); next: - opinfo_put(brk_op); + opinfo_conn_put(brk_op); rcu_read_lock(); } rcu_read_unlock(); From 6cc2268f5647cbfde3d4fc2e4ee005070ea3a8d2 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Fri, 19 May 2023 23:11:33 +0900 Subject: [PATCH 138/303] ksmbd: fix incorrect AllocationSize set in smb2_get_info If filesystem support sparse file, ksmbd should return allocated size using ->i_blocks instead of stat->size. This fix generic/694 xfstests. Cc: stable@vger.kernel.org Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index a1939ff7c742a..7a81541de6021 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -4369,21 +4369,6 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp, return 0; } -static unsigned long long get_allocation_size(struct inode *inode, - struct kstat *stat) -{ - unsigned long long alloc_size = 0; - - if (!S_ISDIR(stat->mode)) { - if ((inode->i_blocks << 9) <= stat->size) - alloc_size = stat->size; - else - alloc_size = inode->i_blocks << 9; - } - - return alloc_size; -} - static void get_file_standard_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { @@ -4398,7 +4383,7 @@ static void get_file_standard_info(struct smb2_query_info_rsp *rsp, sinfo = (struct smb2_file_standard_info *)rsp->Buffer; delete_pending = ksmbd_inode_pending_delete(fp); - sinfo->AllocationSize = cpu_to_le64(get_allocation_size(inode, &stat)); + sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9); sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); sinfo->DeletePending = delete_pending; @@ -4463,7 +4448,7 @@ static int get_file_all_info(struct ksmbd_work *work, file_info->Attributes = fp->f_ci->m_fattr; file_info->Pad1 = 0; file_info->AllocationSize = - cpu_to_le64(get_allocation_size(inode, &stat)); + cpu_to_le64(inode->i_blocks << 9); file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); file_info->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); @@ -4652,7 +4637,7 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp, file_info->ChangeTime = cpu_to_le64(time); file_info->Attributes = fp->f_ci->m_fattr; file_info->AllocationSize = - cpu_to_le64(get_allocation_size(inode, &stat)); + cpu_to_le64(inode->i_blocks << 9); file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); file_info->Reserved = cpu_to_le32(0); rsp->OutputBufferLength = From 6fe55c2799bc29624770c26f98ba7b06214f43e0 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Thu, 25 May 2023 00:13:38 +0900 Subject: [PATCH 139/303] ksmbd: call putname after using the last component last component point filename struct. Currently putname is called after vfs_path_parent_lookup(). And then last component is used for lookup_one_qstr_excl(). name in last component is freed by previous calling putname(). And It cause file lookup failure when testing generic/464 test of xfstest. Fixes: 74d7970febf7 ("ksmbd: fix racy issue from using ->d_parent and ->d_name") Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/vfs.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 9bdb01c5b2015..6f302919e9f77 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -86,12 +86,14 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf, err = vfs_path_parent_lookup(filename, flags, &parent_path, &last, &type, root_share_path); - putname(filename); - if (err) + if (err) { + putname(filename); return err; + } if (unlikely(type != LAST_NORM)) { path_put(&parent_path); + putname(filename); return -ENOENT; } @@ -108,12 +110,14 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf, path->dentry = d; path->mnt = share_conf->vfs_path.mnt; path_put(&parent_path); + putname(filename); return 0; err_out: inode_unlock(parent_path.dentry->d_inode); path_put(&parent_path); + putname(filename); return -ENOENT; } From 396ac4c982f6d6cd7c0293a546a0ba5d77fe7f90 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 26 May 2023 15:01:33 +0300 Subject: [PATCH 140/303] smb: delete an unnecessary statement We don't need to set the list iterators to NULL before a list_for_each_entry() loop because they are assigned inside the macro. Signed-off-by: Dan Carpenter Reviewed-by: Mukesh Ojha Signed-off-by: Steve French --- fs/smb/client/smb2ops.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 5065398665f11..6e3be58cfe49f 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -618,7 +618,6 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, * Add a new one instead */ spin_lock(&ses->iface_lock); - iface = niface = NULL; list_for_each_entry_safe(iface, niface, &ses->iface_list, iface_head) { ret = iface_cmp(iface, &tmp_iface); From 4ea0bf4b98d66a7a790abb285539f395596bae92 Mon Sep 17 00:00:00 2001 From: Ben Noordhuis Date: Sat, 6 May 2023 11:55:02 +0200 Subject: [PATCH 141/303] io_uring: undeprecate epoll_ctl support Libuv recently started using it so there is at least one consumer now. Cc: stable@vger.kernel.org Fixes: 61a2732af4b0 ("io_uring: deprecate epoll_ctl support") Link: https://github.com/libuv/libuv/pull/3979 Signed-off-by: Ben Noordhuis Link: https://lore.kernel.org/r/20230506095502.13401-1-info@bnoordhuis.nl Signed-off-by: Jens Axboe --- io_uring/epoll.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/io_uring/epoll.c b/io_uring/epoll.c index 9aa74d2c80bc4..89bff2068a190 100644 --- a/io_uring/epoll.c +++ b/io_uring/epoll.c @@ -25,10 +25,6 @@ int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll); - pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will " - "be removed in a future Linux kernel version.\n", - current->comm); - if (sqe->buf_index || sqe->splice_fd_in) return -EINVAL; From 780328abc0cbde7398d3c04be56fbb5f89ed10b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 26 May 2023 07:41:23 +0200 Subject: [PATCH 142/303] fbdev: matroxfb ssd1307fb: Switch i2c drivers back to use .probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After commit b8a1a4cd5a98 ("i2c: Provide a temporary .probe_new() call-back type"), all drivers being converted to .probe_new() and then 03c835f498b5 ("i2c: Switch .probe() to not take an id parameter") convert back to (the new) .probe() to be able to eventually drop .probe_new() from struct i2c_driver. Signed-off-by: Uwe Kleine-König Signed-off-by: Helge Deller --- drivers/video/fbdev/matrox/matroxfb_maven.c | 2 +- drivers/video/fbdev/ssd1307fb.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c index 727a10a59811b..b15a8ad92ba70 100644 --- a/drivers/video/fbdev/matrox/matroxfb_maven.c +++ b/drivers/video/fbdev/matrox/matroxfb_maven.c @@ -1291,7 +1291,7 @@ static struct i2c_driver maven_driver={ .driver = { .name = "maven", }, - .probe_new = maven_probe, + .probe = maven_probe, .remove = maven_remove, .id_table = maven_id, }; diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 046b9990d27cd..132d1a2050118 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -844,7 +844,7 @@ static const struct i2c_device_id ssd1307fb_i2c_id[] = { MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id); static struct i2c_driver ssd1307fb_driver = { - .probe_new = ssd1307fb_probe, + .probe = ssd1307fb_probe, .remove = ssd1307fb_remove, .id_table = ssd1307fb_i2c_id, .driver = { From fdd7d1fff4e3b97c4706f4c0e000a38b134b7ae5 Mon Sep 17 00:00:00 2001 From: Steve French Date: Sat, 27 May 2023 03:33:23 -0500 Subject: [PATCH 143/303] cifs: address unused variable warning Fix trivial unused variable warning (when SMB1 support disabled) "ioctl.c:324:17: warning: variable 'caps' set but not used [-Wunused-but-set-variable]" Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202305250056.oZhsJmdD-lkp@intel.com/ Signed-off-by: Steve French --- fs/smb/client/ioctl.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c index cb3be58cd55eb..fff092bbc7a37 100644 --- a/fs/smb/client/ioctl.c +++ b/fs/smb/client/ioctl.c @@ -321,7 +321,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) struct tcon_link *tlink; struct cifs_sb_info *cifs_sb; __u64 ExtAttrBits = 0; +#ifdef CONFIG_CIFS_POSIX +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY __u64 caps; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ +#endif /* CONFIG_CIFS_POSIX */ xid = get_xid(); @@ -331,9 +335,9 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) if (pSMBFile == NULL) break; tcon = tlink_tcon(pSMBFile->tlink); - caps = le64_to_cpu(tcon->fsUnixInfo.Capability); #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + caps = le64_to_cpu(tcon->fsUnixInfo.Capability); if (CIFS_UNIX_EXTATTR_CAP & caps) { __u64 ExtAttrMask = 0; rc = CIFSGetExtAttr(xid, tcon, From 5cf9a090a39c97f4506b7b53739d469b1c05a7e9 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 27 May 2023 11:28:36 +0200 Subject: [PATCH 144/303] fbdev: imsttfb: Release framebuffer and dealloc cmap on error path Add missing cleanups in error path. Signed-off-by: Helge Deller --- drivers/video/fbdev/imsttfb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index 975dd682fae4b..075f119912815 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1452,9 +1452,13 @@ static int init_imstt(struct fb_info *info) FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_YPAN; - fb_alloc_cmap(&info->cmap, 0, 0); + if (fb_alloc_cmap(&info->cmap, 0, 0)) { + framebuffer_release(info); + return -ENODEV; + } if (register_framebuffer(info) < 0) { + fb_dealloc_cmap(&info->cmap); framebuffer_release(info); return -ENODEV; } From 518ecb6a209f6ff678aeadf9f2bf870c0982ca85 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 27 May 2023 11:37:29 +0200 Subject: [PATCH 145/303] fbdev: imsttfb: Fix error path of imsttfb_probe() Release ressources when init_imstt() returns failure. Signed-off-by: Helge Deller --- drivers/video/fbdev/imsttfb.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index 075f119912815..ee7d01ad14068 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1535,8 +1535,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto error; info->pseudo_palette = par->palette; ret = init_imstt(info); - if (!ret) - pci_set_drvdata(pdev, info); + if (ret) + goto error; + + pci_set_drvdata(pdev, info); return ret; error: From d78bd6cc68276bd57f766f7cb98bfe32c23ab327 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 27 May 2023 08:41:09 +0200 Subject: [PATCH 146/303] fbcon: Fix null-ptr-deref in soft_cursor syzbot repored this bug in the softcursor code: BUG: KASAN: null-ptr-deref in soft_cursor+0x384/0x6b4 drivers/video/fbdev/core/softcursor.c:70 Read of size 16 at addr 0000000000000200 by task kworker/u4:1/12 CPU: 0 PID: 12 Comm: kworker/u4:1 Not tainted 6.4.0-rc3-syzkaller-geb0f1697d729 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/28/2023 Workqueue: events_power_efficient fb_flashcursor Call trace: dump_backtrace+0x1b8/0x1e4 arch/arm64/kernel/stacktrace.c:233 show_stack+0x2c/0x44 arch/arm64/kernel/stacktrace.c:240 __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xd0/0x124 lib/dump_stack.c:106 print_report+0xe4/0x514 mm/kasan/report.c:465 kasan_report+0xd4/0x130 mm/kasan/report.c:572 kasan_check_range+0x264/0x2a4 mm/kasan/generic.c:187 __asan_memcpy+0x3c/0x84 mm/kasan/shadow.c:105 soft_cursor+0x384/0x6b4 drivers/video/fbdev/core/softcursor.c:70 bit_cursor+0x113c/0x1a64 drivers/video/fbdev/core/bitblit.c:377 fb_flashcursor+0x35c/0x54c drivers/video/fbdev/core/fbcon.c:380 process_one_work+0x788/0x12d4 kernel/workqueue.c:2405 worker_thread+0x8e0/0xfe8 kernel/workqueue.c:2552 kthread+0x288/0x310 kernel/kthread.c:379 ret_from_fork+0x10/0x20 arch/arm64/kernel/entry.S:853 This fix let bit_cursor() bail out early when a font bitmap isn't available yet. Signed-off-by: Helge Deller Reported-by: syzbot+d910bd780e6efac35869@syzkaller.appspotmail.com Acked-by: Sam Ravnborg Cc: stable@kernel.org --- drivers/video/fbdev/core/bitblit.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index f98e8f298bc19..8587c9da06700 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -247,6 +247,9 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; + if (!vc->vc_font.data) + return; + c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); From b3e6bcb94590dea45396b9481e47b809b1be4afa Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Tue, 23 May 2023 23:49:48 -0400 Subject: [PATCH 147/303] ext4: add EA_INODE checking to ext4_iget() Add a new flag, EXT4_IGET_EA_INODE which indicates whether the inode is expected to have the EA_INODE flag or not. If the flag is not set/clear as expected, then fail the iget() operation and mark the file system as corrupted. This commit also makes the ext4_iget() always perform the is_bad_inode() check even when the inode is already inode cache. This allows us to remove the is_bad_inode() check from the callers of ext4_iget() in the ea_inode code. Reported-by: syzbot+cbb68193bdb95af4340a@syzkaller.appspotmail.com Reported-by: syzbot+62120febbd1ee3c3c860@syzkaller.appspotmail.com Reported-by: syzbot+edce54daffee36421b4c@syzkaller.appspotmail.com Cc: stable@kernel.org Signed-off-by: Theodore Ts'o Link: https://lore.kernel.org/r/20230524034951.779531-2-tytso@mit.edu Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 3 ++- fs/ext4/inode.c | 31 ++++++++++++++++++++++++++----- fs/ext4/xattr.c | 36 +++++++----------------------------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 6948d673bba2e..9525c52b78dc5 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2901,7 +2901,8 @@ typedef enum { EXT4_IGET_NORMAL = 0, EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */ EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */ - EXT4_IGET_BAD = 0x0004 /* Allow to iget a bad inode */ + EXT4_IGET_BAD = 0x0004, /* Allow to iget a bad inode */ + EXT4_IGET_EA_INODE = 0x0008 /* Inode should contain an EA value */ } ext4_iget_flags; extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ce5f21b6c2b3e..258f3cbed347c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4641,6 +4641,21 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) inode_set_iversion_queried(inode, val); } +static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags) + +{ + if (flags & EXT4_IGET_EA_INODE) { + if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) + return "missing EA_INODE flag"; + } else { + if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) + return "unexpected EA_INODE flag"; + } + if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) + return "unexpected bad inode w/o EXT4_IGET_BAD"; + return NULL; +} + struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ext4_iget_flags flags, const char *function, unsigned int line) @@ -4650,6 +4665,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, struct ext4_inode_info *ei; struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct inode *inode; + const char *err_str; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; loff_t size; @@ -4677,8 +4693,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) + if (!(inode->i_state & I_NEW)) { + if ((err_str = check_igot_inode(inode, flags)) != NULL) { + ext4_error_inode(inode, function, line, 0, err_str); + iput(inode); + return ERR_PTR(-EFSCORRUPTED); + } return inode; + } ei = EXT4_I(inode); iloc.bh = NULL; @@ -4944,10 +4966,9 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) ext4_error_inode(inode, function, line, 0, "casefold flag without casefold feature"); - if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) { - ext4_error_inode(inode, function, line, 0, - "bad inode without EXT4_IGET_BAD flag"); - ret = -EUCLEAN; + if ((err_str = check_igot_inode(inode, flags)) != NULL) { + ext4_error_inode(inode, function, line, 0, err_str); + ret = -EFSCORRUPTED; goto bad_inode; } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index dfc2e223bd10d..a27208129a800 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -433,7 +433,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, return -EFSCORRUPTED; } - inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL); + inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE); if (IS_ERR(inode)) { err = PTR_ERR(inode); ext4_error(parent->i_sb, @@ -441,23 +441,6 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, err); return err; } - - if (is_bad_inode(inode)) { - ext4_error(parent->i_sb, - "error while reading EA inode %lu is_bad_inode", - ea_ino); - err = -EIO; - goto error; - } - - if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { - ext4_error(parent->i_sb, - "EA inode %lu does not have EXT4_EA_INODE_FL flag", - ea_ino); - err = -EINVAL; - goto error; - } - ext4_xattr_inode_set_class(inode); /* @@ -478,9 +461,6 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, *ea_inode = inode; return 0; -error: - iput(inode); - return err; } /* Remove entry from mbcache when EA inode is getting evicted */ @@ -1556,11 +1536,10 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value, while (ce) { ea_inode = ext4_iget(inode->i_sb, ce->e_value, - EXT4_IGET_NORMAL); - if (!IS_ERR(ea_inode) && - !is_bad_inode(ea_inode) && - (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) && - i_size_read(ea_inode) == value_len && + EXT4_IGET_EA_INODE); + if (IS_ERR(ea_inode)) + goto next_entry; + if (i_size_read(ea_inode) == value_len && !ext4_xattr_inode_read(ea_inode, ea_data, value_len) && !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data, value_len) && @@ -1570,9 +1549,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value, kvfree(ea_data); return ea_inode; } - - if (!IS_ERR(ea_inode)) - iput(ea_inode); + iput(ea_inode); + next_entry: ce = mb_cache_entry_find_next(ea_inode_cache, ce); } kvfree(ea_data); From 36e4fc57fc1619f462e669e939209c45763bc8f5 Mon Sep 17 00:00:00 2001 From: Akihiro Suda Date: Sun, 28 May 2023 19:36:02 +0200 Subject: [PATCH 148/303] efi: Bump stub image version for macOS HVF compatibility The macOS hypervisor framework includes a host-side VMM called VZLinuxBootLoader [1] which implements native support for booting the Linux kernel inside a guest directly (instead of, e.g., via GRUB installed inside the guest). On x86, it incorporates a BIOS style loader that does not implement or expose EFI to the loaded kernel. However, this loader appears to fail when the 'image minor version' field in the kernel image's PE/COFF header (which is generally only used by EFI based bootloaders) is set to any value other than 0x0. [2] Commit e346bebbd36b1576 ("efi: libstub: Always enable initrd command line loader and bump version") incremented the EFI stub image minor version to convey that all EFI stub kernels now implement support for the initrd= command line option, and do so in a way where it can load initrd images from any filesystem known to the EFI firmware (as opposed to prior implementations that could only load initrds from the same volume that the kernel image was loaded from). Unfortunately, bumping the version to v1.1 triggers this issue in VZLinuxBootLoader, breaking the boot on x86. So let's keep the image minor version at 0x0, and bump the image major version instead. While at it, convert this field to a bit field, so that individual features are discoverable from it, as suggested by Linus. So let's bump the major version to v3, and document the initrd= command line loading feature as being represented by bit 1 in the mask. Note that, due to the prior interpretation as a monotonically increasing version field, loaders are still permitted to assume that the LoadFile2 initrd loading feature is supported for any major version value >= 1, even if bit 0 is not set. [1] https://developer.apple.com/documentation/virtualization/vzlinuxbootloader [2] https://lore.kernel.org/linux-efi/CAG8fp8Teu4G9JuenQrqGndFt2Gy+V4YgJ=hN1xX7AD940YKf3A@mail.gmail.com/ Fixes: e346bebbd36b1576 ("efi: libstub: Always enable initrd command ...") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217485 Signed-off-by: Akihiro Suda [ardb: rewrite comment and commit log] Signed-off-by: Ard Biesheuvel --- include/linux/pe.h | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/include/linux/pe.h b/include/linux/pe.h index 5e1e115408702..fdf9c95709ba0 100644 --- a/include/linux/pe.h +++ b/include/linux/pe.h @@ -11,25 +11,26 @@ #include /* - * Linux EFI stub v1.0 adds the following functionality: - * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path, - * - Loading/starting the kernel from firmware that targets a different - * machine type, via the entrypoint exposed in the .compat PE/COFF section. + * Starting from version v3.0, the major version field should be interpreted as + * a bit mask of features supported by the kernel's EFI stub: + * - 0x1: initrd loading from the LINUX_EFI_INITRD_MEDIA_GUID device path, + * - 0x2: initrd loading using the initrd= command line option, where the file + * may be specified using device path notation, and is not required to + * reside on the same volume as the loaded kernel image. * * The recommended way of loading and starting v1.0 or later kernels is to use * the LoadImage() and StartImage() EFI boot services, and expose the initrd * via the LINUX_EFI_INITRD_MEDIA_GUID device path. * - * Versions older than v1.0 support initrd loading via the image load options - * (using initrd=, limited to the volume from which the kernel itself was - * loaded), or via arch specific means (bootparams, DT, etc). + * Versions older than v1.0 may support initrd loading via the image load + * options (using initrd=, limited to the volume from which the kernel itself + * was loaded), or only via arch specific means (bootparams, DT, etc). * - * On x86, LoadImage() and StartImage() can be omitted if the EFI handover - * protocol is implemented, which can be inferred from the version, - * handover_offset and xloadflags fields in the bootparams structure. + * The minor version field must remain 0x0. + * (https://lore.kernel.org/all/efd6f2d4-547c-1378-1faa-53c044dbd297@gmail.com/) */ -#define LINUX_EFISTUB_MAJOR_VERSION 0x1 -#define LINUX_EFISTUB_MINOR_VERSION 0x1 +#define LINUX_EFISTUB_MAJOR_VERSION 0x3 +#define LINUX_EFISTUB_MINOR_VERSION 0x0 /* * LINUX_PE_MAGIC appears at offset 0x38 into the MS-DOS header of EFI bootable From e8352cf5778c53b80fdcee086278b2048ddb8f98 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Sun, 28 May 2023 01:17:38 -0400 Subject: [PATCH 149/303] tracing: Move setting of tracing_selftest_running out of register_tracer() The variables tracing_selftest_running and tracing_selftest_disabled are only used for when CONFIG_FTRACE_STARTUP_TEST is enabled. Make them only visible within the selftest code. The setting of those variables are in the register_tracer() call, and set in a location where they do not need to be. Create a wrapper around run_tracer_selftest() called do_run_tracer_selftest() which sets those variables, and have register_tracer() call that instead. Having those variables only set within the CONFIG_FTRACE_STARTUP_TEST scope gets rid of them (and also the ability to remove testing against them) when the startup tests are not enabled (most cases). Link: https://lkml.kernel.org/r/20230528051742.1325503-2-rostedt@goodmis.org Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 81801dc317849..87e5920b141fa 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2041,6 +2041,17 @@ static int run_tracer_selftest(struct tracer *type) return 0; } +static int do_run_tracer_selftest(struct tracer *type) +{ + int ret; + + tracing_selftest_running = true; + ret = run_tracer_selftest(type); + tracing_selftest_running = false; + + return ret; +} + static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; @@ -2092,6 +2103,10 @@ static inline int run_tracer_selftest(struct tracer *type) { return 0; } +static inline int do_run_tracer_selftest(struct tracer *type) +{ + return 0; +} #endif /* CONFIG_FTRACE_STARTUP_TEST */ static void add_tracer_options(struct trace_array *tr, struct tracer *t); @@ -2127,8 +2142,6 @@ int __init register_tracer(struct tracer *type) mutex_lock(&trace_types_lock); - tracing_selftest_running = true; - for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ @@ -2157,7 +2170,7 @@ int __init register_tracer(struct tracer *type) /* store the tracer for __set_tracer_option */ type->flags->trace = type; - ret = run_tracer_selftest(type); + ret = do_run_tracer_selftest(type); if (ret < 0) goto out; @@ -2166,7 +2179,6 @@ int __init register_tracer(struct tracer *type) add_tracer_options(&global_trace, type); out: - tracing_selftest_running = false; mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) From 9da705d432a07927526005a0688d81fbbf30e349 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Sun, 28 May 2023 01:17:39 -0400 Subject: [PATCH 150/303] tracing: Have tracer selftests call cond_resched() before running As there are more and more internal selftests being added to the Linux kernel (KSAN, lockdep, etc) the selftests are taking longer to run when these are enabled. Add a cond_resched() to the calling of do_run_tracer_selftest() to force a schedule if NEED_RESCHED is set, otherwise the soft lockup watchdog may trigger on boot up. Link: https://lkml.kernel.org/r/20230528051742.1325503-3-rostedt@goodmis.org Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 87e5920b141fa..70f2b511b9cd7 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2045,6 +2045,13 @@ static int do_run_tracer_selftest(struct tracer *type) { int ret; + /* + * Tests can take a long time, especially if they are run one after the + * other, as does happen during bootup when all the tracers are + * registered. This could cause the soft lockup watchdog to trigger. + */ + cond_resched(); + tracing_selftest_running = true; ret = run_tracer_selftest(type); tracing_selftest_running = false; From a3ae76d7ff781208100e6acc58eb09afb7b4b177 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Sun, 28 May 2023 01:17:40 -0400 Subject: [PATCH 151/303] tracing: Make tracing_selftest_running/delete nops when not used There's no reason to test the condition variables tracing_selftest_running or tracing_selftest_delete when tracing selftests are not enabled. Make them define 0s when not the selftests are not configured in. Link: https://lkml.kernel.org/r/20230528051742.1325503-4-rostedt@goodmis.org Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 70f2b511b9cd7..004f5f99e9439 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -60,6 +60,7 @@ */ bool ring_buffer_expanded; +#ifdef CONFIG_FTRACE_STARTUP_TEST /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the @@ -75,7 +76,6 @@ static bool __read_mostly tracing_selftest_running; */ bool __read_mostly tracing_selftest_disabled; -#ifdef CONFIG_FTRACE_STARTUP_TEST void __init disable_tracing_selftest(const char *reason) { if (!tracing_selftest_disabled) { @@ -83,6 +83,9 @@ void __init disable_tracing_selftest(const char *reason) pr_info("Ftrace startup test is disabled due to %s\n", reason); } } +#else +#define tracing_selftest_running 0 +#define tracing_selftest_disabled 0 #endif /* Pipe tracepoints to printk */ From ac9d2cb1d5f8e22235c399338504dadc87d14e74 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Sun, 28 May 2023 01:17:41 -0400 Subject: [PATCH 152/303] tracing: Only make selftest conditionals affect the global_trace The tracing_selftest_running and tracing_selftest_disabled variables were to keep trace_printk() and other writes from affecting the tracing selftests, as the tracing selftests would examine the ring buffer to see if it contained what it expected or not. trace_printk() and friends could add to the ring buffer and cause the selftests to fail (and then disable the tracer that was being tested). To keep that from happening, these variables were added and would keep trace_printk() and friends from writing to the ring buffer while the tests were going on. But this was only the top level ring buffer (owned by the global_trace instance). There is no reason to prevent writing into ring buffers of other instances via the trace_array_printk() and friends. For the functions that could be used by other instances, check if the global_trace is the tracer instance that is being written to before deciding to not allow the write. Link: https://lkml.kernel.org/r/20230528051742.1325503-5-rostedt@goodmis.org Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 004f5f99e9439..64a4dde073ef6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1054,7 +1054,10 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip, if (!(tr->trace_flags & TRACE_ITER_PRINTK)) return 0; - if (unlikely(tracing_selftest_running || tracing_disabled)) + if (unlikely(tracing_selftest_running && tr == &global_trace)) + return 0; + + if (unlikely(tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ @@ -3512,7 +3515,7 @@ __trace_array_vprintk(struct trace_buffer *buffer, unsigned int trace_ctx; char *tbuffer; - if (tracing_disabled || tracing_selftest_running) + if (tracing_disabled) return 0; /* Don't pollute graph traces with trace_vprintk internals */ @@ -3560,6 +3563,9 @@ __printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { + if (tracing_selftest_running && tr == &global_trace) + return 0; + return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); } From a2d910f02231f33118647fc438157ae69c073f89 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Sun, 28 May 2023 01:17:42 -0400 Subject: [PATCH 153/303] tracing: Have function_graph selftest call cond_resched() When all kernel debugging is enabled (lockdep, KSAN, etc), the function graph enabling and disabling can take several seconds to complete. The function_graph selftest enables and disables function graph tracing several times. With full debugging enabled, the soft lockup watchdog was triggering because the selftest was running without ever scheduling. Add cond_resched() throughout the test to make sure it does not trigger the soft lockup detector. Link: https://lkml.kernel.org/r/20230528051742.1325503-6-rostedt@goodmis.org Signed-off-by: Steven Rostedt (Google) --- kernel/trace/trace_selftest.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index a931d9aaea261..529590499b1fa 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -848,6 +848,12 @@ trace_selftest_startup_function_graph(struct tracer *trace, } #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + /* + * These tests can take some time to run. Make sure on non PREEMPT + * kernels, we do not trigger the softlockup detector. + */ + cond_resched(); + tracing_reset_online_cpus(&tr->array_buffer); set_graph_array(tr); @@ -869,6 +875,8 @@ trace_selftest_startup_function_graph(struct tracer *trace, if (ret) goto out; + cond_resched(); + ret = register_ftrace_graph(&fgraph_ops); if (ret) { warn_failed_init_tracer(trace, ret); @@ -891,6 +899,8 @@ trace_selftest_startup_function_graph(struct tracer *trace, if (ret) goto out; + cond_resched(); + tracing_start(); if (!ret && !count) { From ac2263b588dffd3a1efd7ed0b156ea6c5aea200d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 29 May 2023 06:40:33 -0400 Subject: [PATCH 154/303] Revert "module: error out early on concurrent load of the same module file" This reverts commit 9828ed3f695a138f7add89fa2a186ababceb8006. Sadly, it does seem to cause failures to load modules. Johan Hovold reports: "This change breaks module loading during boot on the Lenovo Thinkpad X13s (aarch64). Specifically it results in indefinite probe deferral of the display and USB (ethernet) which makes it a pain to debug. Typing in the dark to acquire some logs reveals that other modules are missing as well" Since this was applied late as a "let's try this", I'm reverting it asap, and we can try to figure out what goes wrong later. The excessive parallel module loading problem is annoying, but not noticeable in normal situations, and this was only meant as an optimistic workaround for a user-space bug. One possible solution may be to do the optimistic exclusive open first, and then use a lock to serialize loading if that fails. Reported-by: Johan Hovold Link: https://lore.kernel.org/lkml/ZHRpH-JXAxA6DnzR@hovoldconsulting.com/ Signed-off-by: Linus Torvalds --- include/linux/fs.h | 6 ----- kernel/module/main.c | 58 ++++++++++++-------------------------------- 2 files changed, 15 insertions(+), 49 deletions(-) diff --git a/include/linux/fs.h b/include/linux/fs.h index 86b50271b4f74..133f0640fb241 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2566,12 +2566,6 @@ static inline int deny_write_access(struct file *file) struct inode *inode = file_inode(file); return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; } -static inline int exclusive_deny_write_access(struct file *file) -{ - int old = 0; - struct inode *inode = file_inode(file); - return atomic_try_cmpxchg(&inode->i_writecount, &old, -1) ? 0 : -ETXTBSY; -} static inline void put_write_access(struct inode * inode) { atomic_dec(&inode->i_writecount); diff --git a/kernel/module/main.c b/kernel/module/main.c index b4c7e925fdb07..044aa2c9e3cb0 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -3057,13 +3057,25 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, return load_module(&info, uargs, 0); } -static int file_init_module(struct file *file, const char __user * uargs, int flags) +SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) { struct load_info info = { }; void *buf = NULL; int len; + int err; + + err = may_init_module(); + if (err) + return err; + + pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); - len = kernel_read_file(file, 0, &buf, INT_MAX, NULL, + if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS + |MODULE_INIT_IGNORE_VERMAGIC + |MODULE_INIT_COMPRESSED_FILE)) + return -EINVAL; + + len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL, READING_MODULE); if (len < 0) { mod_stat_inc(&failed_kreads); @@ -3072,7 +3084,7 @@ static int file_init_module(struct file *file, const char __user * uargs, int fl } if (flags & MODULE_INIT_COMPRESSED_FILE) { - int err = module_decompress(&info, buf, len); + err = module_decompress(&info, buf, len); vfree(buf); /* compressed data is no longer needed */ if (err) { mod_stat_inc(&failed_decompress); @@ -3087,46 +3099,6 @@ static int file_init_module(struct file *file, const char __user * uargs, int fl return load_module(&info, uargs, flags); } -/* - * kernel_read_file() will already deny write access, but module - * loading wants _exclusive_ access to the file, so we do that - * here, along with basic sanity checks. - */ -static int prepare_file_for_module_load(struct file *file) -{ - if (!file || !(file->f_mode & FMODE_READ)) - return -EBADF; - if (!S_ISREG(file_inode(file)->i_mode)) - return -EINVAL; - return exclusive_deny_write_access(file); -} - -SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) -{ - struct fd f; - int err; - - err = may_init_module(); - if (err) - return err; - - pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); - - if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS - |MODULE_INIT_IGNORE_VERMAGIC - |MODULE_INIT_COMPRESSED_FILE)) - return -EINVAL; - - f = fdget(fd); - err = prepare_file_for_module_load(f.file); - if (!err) { - err = file_init_module(f.file, uargs, flags); - allow_write_access(f.file); - } - fdput(f); - return err; -} - /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ char *module_flags(struct module *mod, char *buf, bool show_state) { From ed309ce522185583b163bd0c74f0d9f299fe1826 Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Fri, 26 May 2023 11:59:08 +0100 Subject: [PATCH 155/303] RISC-V: mark hibernation as nonportable Hibernation support depends on firmware marking its reserved/PMP protected regions as not accessible from Linux. The latest versions of the de-facto SBI implementation (OpenSBI) do not do this, having dropped the no-map property to enable 1 GiB huge page mappings by the kernel. This was exposed by commit 3335068f8721 ("riscv: Use PUD/P4D/PGD pages for the linear mapping"), which made the first 2 MiB of DRAM (where SBI typically resides) accessible by the kernel. Attempting to hibernate with either OpenSBI, or other implementations following its lead, will lead to a kernel panic ([1], [2]) as the hibernation process will attempt to save/restore any mapped regions, including the PMP protected regions in use by the SBI implementation. Mark hibernation as depending on "NONPORTABLE", as only a small subset of systems are capable of supporting it, until such time that an SBI implementation independent way to communicate what regions are in use has been agreed on. As hibernation support landed in v6.4-rc1, disabling it for most platforms does not constitute a regression. The alternative would have been reverting commit 3335068f8721 ("riscv: Use PUD/P4D/PGD pages for the linear mapping"). Doing so would permit hibernation on platforms with these SBI implementations, but would limit the options we have to solve the protection of the region without causing a regression in hibernation support. Reported-by: Song Shuai Link: https://lore.kernel.org/all/CAAYs2=gQvkhTeioMmqRDVGjdtNF_vhB+vm_1dHJxPNi75YDQ_Q@mail.gmail.com/ [1] Reported-by: JeeHeng Sia Link: https://groups.google.com/a/groups.riscv.org/g/sw-dev/c/ITXwaKfA6z8 [2] Signed-off-by: Conor Dooley Link: https://lore.kernel.org/r/20230526-astride-detonator-9ae120051159@wendy Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 348c0fa1fc8c7..2bb0c38419ff5 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -799,8 +799,11 @@ menu "Power management options" source "kernel/power/Kconfig" +# Hibernation is only possible on systems where the SBI implementation has +# marked its reserved memory as not accessible from, or does not run +# from the same memory as, Linux config ARCH_HIBERNATION_POSSIBLE - def_bool y + def_bool NONPORTABLE config ARCH_HIBERNATION_HEADER def_bool HIBERNATION From a6e766dea0a22918735176e4af862d535962f11e Mon Sep 17 00:00:00 2001 From: Ekansh Gupta Date: Tue, 23 May 2023 16:25:47 +0100 Subject: [PATCH 156/303] misc: fastrpc: Pass proper scm arguments for secure map request If a map request is made with securemap attribute, the memory ownership needs to be reassigned to new VMID to allow access from protection domain. Currently only DSP VMID is passed to the reassign call which is incorrect as only a combination of HLOS and DSP VMID is allowed for memory ownership reassignment and passing only DSP VMID will cause assign call failure. Also pass proper restoring permissions to HLOS as the source permission will now carry both HLOS and DSP VMID permission. Change is also made to get valid physical address from scatter/gather for this allocation request. Fixes: e90d91190619 ("misc: fastrpc: Add support to secure memory map") Cc: stable Tested-by: Ekansh Gupta Signed-off-by: Ekansh Gupta Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20230523152550.438363-2-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index f48466960f1b9..32a5415624bf5 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -316,12 +316,14 @@ static void fastrpc_free_map(struct kref *ref) if (map->table) { if (map->attr & FASTRPC_ATTR_SECUREMAP) { struct qcom_scm_vmperm perm; + int vmid = map->fl->cctx->vmperms[0].vmid; + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid); int err = 0; perm.vmid = QCOM_SCM_VMID_HLOS; perm.perm = QCOM_SCM_PERM_RWX; err = qcom_scm_assign_mem(map->phys, map->size, - &map->fl->cctx->perms, &perm, 1); + &src_perms, &perm, 1); if (err) { dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", map->phys, map->size, err); @@ -787,8 +789,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, goto map_err; } - map->phys = sg_dma_address(map->table->sgl); - map->phys += ((u64)fl->sctx->sid << 32); + if (attr & FASTRPC_ATTR_SECUREMAP) { + map->phys = sg_phys(map->table->sgl); + } else { + map->phys = sg_dma_address(map->table->sgl); + map->phys += ((u64)fl->sctx->sid << 32); + } map->size = len; map->va = sg_virt(map->table->sgl); map->len = len; @@ -798,9 +804,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, * If subsystem VMIDs are defined in DTSI, then do * hyp_assign from HLOS to those VM(s) */ + u64 src_perms = BIT(QCOM_SCM_VMID_HLOS); + struct qcom_scm_vmperm dst_perms[2] = {0}; + + dst_perms[0].vmid = QCOM_SCM_VMID_HLOS; + dst_perms[0].perm = QCOM_SCM_PERM_RW; + dst_perms[1].vmid = fl->cctx->vmperms[0].vmid; + dst_perms[1].perm = QCOM_SCM_PERM_RWX; map->attr = attr; - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms, - fl->cctx->vmperms, fl->cctx->vmcount); + err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2); if (err) { dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", map->phys, map->size, err); From 3c7d0079a1831118ef232bd9c2f34d058a1f31c2 Mon Sep 17 00:00:00 2001 From: Ekansh Gupta Date: Tue, 23 May 2023 16:25:48 +0100 Subject: [PATCH 157/303] misc: fastrpc: Reassign memory ownership only for remote heap The userspace map request for remote heap allocates CMA memory. The ownership of this memory needs to be reassigned to proper owners to allow access from the protection domain running on DSP. This reassigning of ownership is not correct if done for any other supported flags. When any other flag is requested from userspace, fastrpc is trying to reassign the ownership of memory and this reassignment is getting skipped for remote heap request which is incorrect. Add proper flag check to reassign the memory only if remote heap is requested. Fixes: 532ad70c6d44 ("misc: fastrpc: Add mmap request assigning for static PD pool") Cc: stable Tested-by: Ekansh Gupta Signed-off-by: Ekansh Gupta Reviewed-by: Srinivas Kandagatla Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20230523152550.438363-3-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 32a5415624bf5..a654dc416480b 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1904,7 +1904,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp) req.vaddrout = rsp_msg.vaddr; /* Add memory to static PD pool, protection thru hypervisor */ - if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { + if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { struct qcom_scm_vmperm perm; perm.vmid = QCOM_SCM_VMID_HLOS; From b6a062853ddf6b4f653af2d8b75ba45bb9a036ad Mon Sep 17 00:00:00 2001 From: Richard Acayan Date: Tue, 23 May 2023 16:25:49 +0100 Subject: [PATCH 158/303] misc: fastrpc: return -EPIPE to invocations on device removal The return value is initialized as -1, or -EPERM. The completion of an invocation implies that the return value is set appropriately, but "Permission denied" does not accurately describe the outcome of the invocation. Set the invocation's return value to a more appropriate "Broken pipe", as the cleanup breaks the driver's connection with rpmsg. Fixes: c68cfb718c8f ("misc: fastrpc: Add support for context Invoke method") Cc: stable Signed-off-by: Richard Acayan Reviewed-by: Srinivas Kandagatla Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20230523152550.438363-4-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index a654dc416480b..964f67dad2f9f 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -2349,8 +2349,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user) struct fastrpc_invoke_ctx *ctx; spin_lock(&user->lock); - list_for_each_entry(ctx, &user->pending, node) + list_for_each_entry(ctx, &user->pending, node) { + ctx->retval = -EPIPE; complete(&ctx->work); + } spin_unlock(&user->lock); } From 46248400d81e2aa0b65cd659d6f40188192a58b6 Mon Sep 17 00:00:00 2001 From: Richard Acayan Date: Tue, 23 May 2023 16:25:50 +0100 Subject: [PATCH 159/303] misc: fastrpc: reject new invocations during device removal The channel's rpmsg object allows new invocations to be made. After old invocations are already interrupted, the driver shouldn't try to invoke anymore. Invalidating the rpmsg at the end of the driver removal function makes it easy to cause a race condition in userspace. Even closing a file descriptor before the driver finishes its cleanup can cause an invocation via fastrpc_release_current_dsp_process() and subsequent timeout. Invalidate the channel before the invocations are interrupted to make sure that no invocations can be created to hang after the device closes. Fixes: c68cfb718c8f ("misc: fastrpc: Add support for context Invoke method") Cc: stable Signed-off-by: Richard Acayan Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20230523152550.438363-5-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 964f67dad2f9f..30d4d0476248f 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -2363,7 +2363,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) struct fastrpc_user *user; unsigned long flags; + /* No invocations past this point */ spin_lock_irqsave(&cctx->lock, flags); + cctx->rpdev = NULL; list_for_each_entry(user, &cctx->users, user) fastrpc_notify_users(user); spin_unlock_irqrestore(&cctx->lock, flags); @@ -2382,7 +2384,6 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) of_platform_depopulate(&rpdev->dev); - cctx->rpdev = NULL; fastrpc_channel_ctx_put(cctx); } From dbe678f6192f27879ac9ff6bc7a1036aad85aae9 Mon Sep 17 00:00:00 2001 From: Frank Li Date: Thu, 18 May 2023 11:49:45 -0400 Subject: [PATCH 160/303] usb: cdns3: fix NCM gadget RX speed 20x slow than expection at iMX8QM At iMX8QM platform, enable NCM gadget and run 'iperf3 -s'. At host, run 'iperf3 -V -c fe80::6863:98ff:feef:3e0%enxc6e147509498' [ 5] 0.00-1.00 sec 1.55 MBytes 13.0 Mbits/sec 90 4.18 KBytes [ 5] 1.00-2.00 sec 1.44 MBytes 12.0 Mbits/sec 75 4.18 KBytes [ 5] 2.00-3.00 sec 1.48 MBytes 12.4 Mbits/sec 75 4.18 KBytes Expected speed should be bigger than 300Mbits/sec. The root cause of this performance drop was found to be data corruption happening at 4K borders in some Ethernet packets, leading to TCP checksum errors. This corruption occurs from the position (4K - (address & 0x7F)) to 4K. The u_ether function's allocation of skb_buff reserves 64B, meaning all RX addresses resemble 0xXXXX0040. Force trb_burst_size to 16 can fix this problem. Cc: stable@vger.kernel.org Fixes: 7733f6c32e36 ("usb: cdns3: Add Cadence USB3 DRD Driver") Signed-off-by: Frank Li Link: https://lore.kernel.org/r/20230518154946.3666662-1-Frank.Li@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/cdns3/cdns3-gadget.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index ccfaebca6faa7..1dcadef933e3a 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -2097,6 +2097,19 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) else priv_ep->trb_burst_size = 16; + /* + * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs + * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the + * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI + * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This + * results in data corruption when it crosses the 4K border. The corruption + * specifically occurs from the position (4K - (address & 0x7F)) to 4K. + * + * So force trb_burst_size to 16 at such platform. + */ + if (priv_dev->dev_ver < DEV_VER_V2) + priv_ep->trb_burst_size = 16; + mult = min_t(u8, mult, EP_CFG_MULT_MAX); buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); From efb6b535207395a5c7317993602e2503ca8cb4b3 Mon Sep 17 00:00:00 2001 From: Uttkarsh Aggarwal Date: Thu, 25 May 2023 14:58:54 +0530 Subject: [PATCH 161/303] usb: gadget: f_fs: Add unbind event before functionfs_unbind While exercising the unbind path, with the current implementation the functionfs_unbind would be calling which waits for the ffs->mutex to be available, however within the same time ffs_ep0_read is invoked & if no setup packets are pending, it will invoke function wait_event_interruptible_exclusive_locked_irq which by definition waits for the ev.count to be increased inside the same mutex for which functionfs_unbind is waiting. This creates deadlock situation because the functionfs_unbind won't get the lock until ev.count is increased which can only happen if the caller ffs_func_unbind can proceed further. Following is the illustration: CPU1 CPU2 ffs_func_unbind() ffs_ep0_read() mutex_lock(ffs->mutex) wait_event(ffs->ev.count) functionfs_unbind() mutex_lock(ffs->mutex) mutex_unlock(ffs->mutex) ffs_event_add() Fix this by moving the event unbind before functionfs_unbind to ensure the ev.count is incrased properly. Fixes: 6a19da111057 ("usb: gadget: f_fs: Prevent race during ffs_ep0_queue_wait") Cc: stable Signed-off-by: Uttkarsh Aggarwal Link: https://lore.kernel.org/r/20230525092854.7992-1-quic_uaggarwa@quicinc.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index a13c946e0663c..f41a385a5c421 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3535,6 +3535,7 @@ static void ffs_func_unbind(struct usb_configuration *c, /* Drain any pending AIO completions */ drain_workqueue(ffs->io_completion_wq); + ffs_event_add(ffs, FUNCTIONFS_UNBIND); if (!--opts->refcnt) functionfs_unbind(ffs); @@ -3559,7 +3560,6 @@ static void ffs_func_unbind(struct usb_configuration *c, func->function.ssp_descriptors = NULL; func->interfaces_nums = NULL; - ffs_event_add(ffs, FUNCTIONFS_UNBIND); } static struct usb_function *ffs_alloc(struct usb_function_instance *fi) From 016da9c65fec9f0e78c4909ed9a0f2d567af6775 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 25 May 2023 18:38:37 +0300 Subject: [PATCH 162/303] usb: gadget: udc: fix NULL dereference in remove() The "udc" pointer was never set in the probe() function so it will lead to a NULL dereference in udc_pci_remove() when we do: usb_del_gadget_udc(&udc->gadget); Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/ZG+A/dNpFWAlCChk@kili Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/amd5536udc_pci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c index c80f9bd51b750..a36913ae31f9e 100644 --- a/drivers/usb/gadget/udc/amd5536udc_pci.c +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c @@ -170,6 +170,9 @@ static int udc_pci_probe( retval = -ENODEV; goto err_probe; } + + udc = dev; + return 0; err_probe: From 47fe1c3064c6bc1bfa3c032ff78e603e5dd6e5bc Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 29 May 2023 16:32:37 +0900 Subject: [PATCH 163/303] block: fix revalidate performance regression The scsi driver function sd_read_block_characteristics() always calls disk_set_zoned() to a disk zoned model correctly, in case the device model changed. This is done even for regular disks to set the zoned model to BLK_ZONED_NONE and free any zone related resources if the drive previously was zoned. This behavior significantly impact the time it takes to revalidate disks on a large system as the call to disk_clear_zone_settings() done from disk_set_zoned() for the BLK_ZONED_NONE case results in the device request queued to be frozen, even if there are no zone resources to free. Avoid this overhead for non-zoned devices by not calling disk_clear_zone_settings() in disk_set_zoned() if the device model was already set to BLK_ZONED_NONE, which is always the case for regular devices. Reported by: Brian Bunker Fixes: 508aebb80527 ("block: introduce blk_queue_clear_zone_settings()") Cc: stable@vger.kernel.org Signed-off-by: Damien Le Moal Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/20230529073237.1339862-1-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-settings.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/blk-settings.c b/block/blk-settings.c index 896b4654ab00f..4dd59059b788e 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -915,6 +915,7 @@ static bool disk_has_partitions(struct gendisk *disk) void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model) { struct request_queue *q = disk->queue; + unsigned int old_model = q->limits.zoned; switch (model) { case BLK_ZONED_HM: @@ -952,7 +953,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model) */ blk_queue_zone_write_granularity(q, queue_logical_block_size(q)); - } else { + } else if (old_model != BLK_ZONED_NONE) { disk_clear_zone_settings(disk); } } From 7b32040f6d7f885ffc09a6df7c17992d56d2eab8 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 15 May 2023 19:24:56 +0200 Subject: [PATCH 164/303] dt-bindings: usb: snps,dwc3: Fix "snps,hsphy_interface" type The "snps,hsphy_interface" is string, not u8. Fix the type. Fixes: 389d77658801 ("dt-bindings: usb: Convert DWC USB3 bindings to DT schema") Cc: stable Reviewed-by: Krzysztof Kozlowski Signed-off-by: Marek Vasut Link: https://lore.kernel.org/r/20230515172456.179049-1-marex@denx.de Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/usb/snps,dwc3.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml index 50edc4da780e9..4f7625955cccc 100644 --- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml @@ -287,7 +287,7 @@ properties: description: High-Speed PHY interface selection between UTMI+ and ULPI when the DWC_USB3_HSPHY_INTERFACE has value 3. - $ref: /schemas/types.yaml#/definitions/uint8 + $ref: /schemas/types.yaml#/definitions/string enum: [utmi, ulpi] snps,quirk-frame-length-adjustment: From 0143d148d1e882fb1538dc9974c94d63961719b9 Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Mon, 15 May 2023 21:09:55 +0800 Subject: [PATCH 165/303] usb: usbfs: Enforce page requirements for mmap The current implementation of usbdev_mmap uses usb_alloc_coherent to allocate memory pages that will later be mapped into the user space. Meanwhile, usb_alloc_coherent employs three different methods to allocate memory, as outlined below: * If hcd->localmem_pool is non-null, it uses gen_pool_dma_alloc to allocate memory; * If DMA is not available, it uses kmalloc to allocate memory; * Otherwise, it uses dma_alloc_coherent. However, it should be noted that gen_pool_dma_alloc does not guarantee that the resulting memory will be page-aligned. Furthermore, trying to map slab pages (i.e., memory allocated by kmalloc) into the user space is not resonable and can lead to problems, such as a type confusion bug when PAGE_TABLE_CHECK=y [1]. To address these issues, this patch introduces hcd_alloc_coherent_pages, which addresses the above two problems. Specifically, hcd_alloc_coherent_pages uses gen_pool_dma_alloc_align instead of gen_pool_dma_alloc to ensure that the memory is page-aligned. To replace kmalloc, hcd_alloc_coherent_pages directly allocates pages by calling __get_free_pages. Reported-by: syzbot+fcf1a817ceb50935ce99@syzkaller.appspotmail.comm Closes: https://lore.kernel.org/lkml/000000000000258e5e05fae79fc1@google.com/ [1] Fixes: f7d34b445abc ("USB: Add support for usbfs zerocopy.") Fixes: ff2437befd8f ("usb: host: Fix excessive alignment restriction for local memory allocations") Cc: stable@vger.kernel.org Signed-off-by: Ruihan Li Acked-by: Alan Stern Link: https://lore.kernel.org/r/20230515130958.32471-2-lrh2000@pku.edu.cn Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/buffer.c | 41 +++++++++++++++++++++++++++++++++++++++ drivers/usb/core/devio.c | 9 +++++---- include/linux/usb/hcd.h | 5 +++++ 3 files changed, 51 insertions(+), 4 deletions(-) diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c index fbb087b728dc9..268ccbec88f95 100644 --- a/drivers/usb/core/buffer.c +++ b/drivers/usb/core/buffer.c @@ -172,3 +172,44 @@ void hcd_buffer_free( } dma_free_coherent(hcd->self.sysdev, size, addr, dma); } + +void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, + size_t size, gfp_t mem_flags, dma_addr_t *dma) +{ + if (size == 0) + return NULL; + + if (hcd->localmem_pool) + return gen_pool_dma_alloc_align(hcd->localmem_pool, + size, dma, PAGE_SIZE); + + /* some USB hosts just use PIO */ + if (!hcd_uses_dma(hcd)) { + *dma = DMA_MAPPING_ERROR; + return (void *)__get_free_pages(mem_flags, + get_order(size)); + } + + return dma_alloc_coherent(hcd->self.sysdev, + size, dma, mem_flags); +} + +void hcd_buffer_free_pages(struct usb_hcd *hcd, + size_t size, void *addr, dma_addr_t dma) +{ + if (!addr) + return; + + if (hcd->localmem_pool) { + gen_pool_free(hcd->localmem_pool, + (unsigned long)addr, size); + return; + } + + if (!hcd_uses_dma(hcd)) { + free_pages((unsigned long)addr, get_order(size)); + return; + } + + dma_free_coherent(hcd->self.sysdev, size, addr, dma); +} diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e501a03d6c700..3936ca7f7d2fc 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -186,6 +186,7 @@ static int connected(struct usb_dev_state *ps) static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) { struct usb_dev_state *ps = usbm->ps; + struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); unsigned long flags; spin_lock_irqsave(&ps->lock, flags); @@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) list_del(&usbm->memlist); spin_unlock_irqrestore(&ps->lock, flags); - usb_free_coherent(ps->dev, usbm->size, usbm->mem, - usbm->dma_handle); + hcd_buffer_free_pages(hcd, usbm->size, + usbm->mem, usbm->dma_handle); usbfs_decrease_memory_usage( usbm->size + sizeof(struct usb_memory)); kfree(usbm); @@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) goto error_decrease_mem; } - mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, - &dma_handle); + mem = hcd_buffer_alloc_pages(hcd, + size, GFP_USER | __GFP_NOWARN, &dma_handle); if (!mem) { ret = -ENOMEM; goto error_free_usbm; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 094c77eaf4551..0c7eff91adf4e 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -501,6 +501,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size, void hcd_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma); +void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, + size_t size, gfp_t mem_flags, dma_addr_t *dma); +void hcd_buffer_free_pages(struct usb_hcd *hcd, + size_t size, void *addr, dma_addr_t dma); + /* generic bus glue, needed for host controllers that don't use PCI */ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); From d0b861653f8c16839c3035875b556afc4472f941 Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Mon, 15 May 2023 21:09:56 +0800 Subject: [PATCH 166/303] usb: usbfs: Use consistent mmap functions When hcd->localmem_pool is non-null, localmem_pool is used to allocate DMA memory. In this case, the dma address will be properly returned (in dma_handle), and dma_mmap_coherent should be used to map this memory into the user space. However, the current implementation uses pfn_remap_range, which is supposed to map normal pages. Instead of repeating the logic in the memory allocation function, this patch introduces a more robust solution. Here, the type of allocated memory is checked by testing whether dma_handle is properly set. If dma_handle is properly returned, it means some DMA pages are allocated and dma_mmap_coherent should be used to map them. Otherwise, normal pages are allocated and pfn_remap_range should be called. This ensures that the correct mmap functions are used consistently, independently with logic details that determine which type of memory gets allocated. Fixes: a0e710a7def4 ("USB: usbfs: fix mmap dma mismatch") Cc: stable@vger.kernel.org Signed-off-by: Ruihan Li Link: https://lore.kernel.org/r/20230515130958.32471-3-lrh2000@pku.edu.cn Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 3936ca7f7d2fc..fcf68818e9992 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -235,7 +235,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) size_t size = vma->vm_end - vma->vm_start; void *mem; unsigned long flags; - dma_addr_t dma_handle; + dma_addr_t dma_handle = DMA_MAPPING_ERROR; int ret; ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); @@ -265,7 +265,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); - if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { + /* + * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates + * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check + * whether we are in such cases, and then use remap_pfn_range (or + * dma_mmap_coherent) to map normal (or DMA) pages into the user + * space, respectively. + */ + if (dma_handle == DMA_MAPPING_ERROR) { if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(usbm->mem) >> PAGE_SHIFT, size, vma->vm_page_prot) < 0) { From 81a31a860bb61d54eb688af2568d9332ed9b8942 Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Mon, 15 May 2023 21:09:57 +0800 Subject: [PATCH 167/303] mm: page_table_check: Make it dependent on EXCLUSIVE_SYSTEM_RAM Without EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory regions into the userspace via /dev/mem. At the same time, pages may change their properties (e.g., from anonymous pages to named pages) while they are still being mapped in the userspace, leading to "corruption" detected by the page table check. To avoid these false positives, this patch makes PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. This dependency is understandable because PAGE_TABLE_CHECK is a hardening technique but /dev/mem without STRICT_DEVMEM (i.e., !EXCLUSIVE_SYSTEM_RAM) is itself a security problem. Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via /dev/mem. However, these pages are always considered as named pages, so they won't break the logic used in the page table check. Cc: # 5.17 Signed-off-by: Ruihan Li Acked-by: David Hildenbrand Acked-by: Pasha Tatashin Link: https://lore.kernel.org/r/20230515130958.32471-4-lrh2000@pku.edu.cn Signed-off-by: Greg Kroah-Hartman --- Documentation/mm/page_table_check.rst | 19 +++++++++++++++++++ mm/Kconfig.debug | 1 + 2 files changed, 20 insertions(+) diff --git a/Documentation/mm/page_table_check.rst b/Documentation/mm/page_table_check.rst index cfd8f4117cf3e..c12838ce6b8de 100644 --- a/Documentation/mm/page_table_check.rst +++ b/Documentation/mm/page_table_check.rst @@ -52,3 +52,22 @@ Build kernel with: Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page table support without extra kernel parameter. + +Implementation notes +==================== + +We specifically decided not to use VMA information in order to avoid relying on +MM states (except for limited "struct page" info). The page table check is a +separate from Linux-MM state machine that verifies that the user accessible +pages are not falsely shared. + +PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without +EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory +regions into the userspace via /dev/mem. At the same time, pages may change +their properties (e.g., from anonymous pages to named pages) while they are +still being mapped in the userspace, leading to "corruption" detected by the +page table check. + +Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via +/dev/mem. However, these pages are always considered as named pages, so they +won't break the logic used in the page table check. diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index a925415b4d109..018a5bd2f576d 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -98,6 +98,7 @@ config PAGE_OWNER config PAGE_TABLE_CHECK bool "Check for invalid mappings in user page tables" depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK + depends on EXCLUSIVE_SYSTEM_RAM select PAGE_EXTENSION help Check that anonymous page is not being mapped twice with read write From 44d0fb387b53e56c8a050bac5c7d460e21eb226f Mon Sep 17 00:00:00 2001 From: Ruihan Li Date: Mon, 15 May 2023 21:09:58 +0800 Subject: [PATCH 168/303] mm: page_table_check: Ensure user pages are not slab pages The current uses of PageAnon in page table check functions can lead to type confusion bugs between struct page and slab [1], if slab pages are accidentally mapped into the user space. This is because slab reuses the bits in struct page to store its internal states, which renders PageAnon ineffective on slab pages. Since slab pages are not expected to be mapped into the user space, this patch adds BUG_ON(PageSlab(page)) checks to make sure that slab pages are not inadvertently mapped. Otherwise, there must be some bugs in the kernel. Reported-by: syzbot+fcf1a817ceb50935ce99@syzkaller.appspotmail.com Closes: https://lore.kernel.org/lkml/000000000000258e5e05fae79fc1@google.com/ [1] Fixes: df4e817b7108 ("mm: page table check") Cc: # 5.17 Signed-off-by: Ruihan Li Acked-by: Pasha Tatashin Link: https://lore.kernel.org/r/20230515130958.32471-5-lrh2000@pku.edu.cn Signed-off-by: Greg Kroah-Hartman --- include/linux/page-flags.h | 6 ++++++ mm/page_table_check.c | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 1c68d67b832ff..92a2063a0a232 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -617,6 +617,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. + * + * For slab pages, since slab reuses the bits in struct page to store its + * internal states, the page->mapping does not exist as such, nor do these + * flags below. So in order to avoid testing non-existent bits, please + * make sure that PageSlab(page) actually evaluates to false before calling + * the following functions (e.g., PageAnon). See mm/slab.h. */ #define PAGE_MAPPING_ANON 0x1 #define PAGE_MAPPING_MOVABLE 0x2 diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 25d8610c00429..f2baf97d5f389 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -71,6 +71,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr, page = pfn_to_page(pfn); page_ext = page_ext_get(page); + + BUG_ON(PageSlab(page)); anon = PageAnon(page); for (i = 0; i < pgcnt; i++) { @@ -107,6 +109,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr, page = pfn_to_page(pfn); page_ext = page_ext_get(page); + + BUG_ON(PageSlab(page)); anon = PageAnon(page); for (i = 0; i < pgcnt; i++) { @@ -133,6 +137,8 @@ void __page_table_check_zero(struct page *page, unsigned int order) struct page_ext *page_ext; unsigned long i; + BUG_ON(PageSlab(page)); + page_ext = page_ext_get(page); BUG_ON(!page_ext); for (i = 0; i < (1ul << order); i++) { From ffe14de983252862c91ad23bd5ca72fd9398d0e6 Mon Sep 17 00:00:00 2001 From: Michael Margolin Date: Thu, 25 May 2023 09:44:44 +0000 Subject: [PATCH 169/303] MAINTAINERS: Update maintainer of Amazon EFA driver Change EFA driver maintainer from Gal Pressman to myself. Keep Gal as a reviewer at his request. Link: https://lore.kernel.org/r/20230525094444.12570-1-mrgolin@amazon.com Signed-off-by: Michael Margolin Acked-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- MAINTAINERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index c560437378483..dc8c4708733b6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -956,7 +956,8 @@ F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst F: drivers/net/ethernet/amazon/ AMAZON RDMA EFA DRIVER -M: Gal Pressman +M: Michael Margolin +R: Gal Pressman R: Yossi Leybovich L: linux-rdma@vger.kernel.org S: Supported From c8f304d75f6c6cc679a73f89591f9a915da38f09 Mon Sep 17 00:00:00 2001 From: Mustafa Ismail Date: Mon, 22 May 2023 10:56:53 -0500 Subject: [PATCH 170/303] RDMA/irdma: Prevent QP use after free There is a window where the poll cq may use a QP that has been freed. This can happen if a CQE is polled before irdma_clean_cqes() can clear the CQE's related to the QP and the destroy QP races to free the QP memory. then the QP structures are used in irdma_poll_cq. Fix this by moving the clearing of CQE's before the reference is removed and the QP is destroyed. Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs") Link: https://lore.kernel.org/r/20230522155654.1309-3-shiraz.saleem@intel.com Signed-off-by: Mustafa Ismail Signed-off-by: Shiraz Saleem Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/irdma/verbs.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index ab5cdf7827852..68ce3cd400029 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -522,11 +522,6 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) if (!iwqp->user_mode) cancel_delayed_work_sync(&iwqp->dwork_flush); - irdma_qp_rem_ref(&iwqp->ibqp); - wait_for_completion(&iwqp->free_qp); - irdma_free_lsmm_rsrc(iwqp); - irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); - if (!iwqp->user_mode) { if (iwqp->iwscq) { irdma_clean_cqes(iwqp, iwqp->iwscq); @@ -534,6 +529,12 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) irdma_clean_cqes(iwqp, iwqp->iwrcq); } } + + irdma_qp_rem_ref(&iwqp->ibqp); + wait_for_completion(&iwqp->free_qp); + irdma_free_lsmm_rsrc(iwqp); + irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); + irdma_remove_push_mmap_entries(iwqp); irdma_free_qp_rsrc(iwqp); From 5842d1d9c1b0d17e0c29eae65ae1f245f83682dd Mon Sep 17 00:00:00 2001 From: Mustafa Ismail Date: Mon, 22 May 2023 10:56:54 -0500 Subject: [PATCH 171/303] RDMA/irdma: Fix Local Invalidate fencing If the local invalidate fence is indicated in the WR, only the read fence is currently being set in WQE. Fix this to set both the read and local fence in the WQE. Fixes: b48c24c2d710 ("RDMA/irdma: Implement device supported verb APIs") Link: https://lore.kernel.org/r/20230522155654.1309-4-shiraz.saleem@intel.com Signed-off-by: Mustafa Ismail Signed-off-by: Shiraz Saleem Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/irdma/verbs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 68ce3cd400029..eaa12c1245982 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -3292,6 +3292,7 @@ static int irdma_post_send(struct ib_qp *ibqp, break; case IB_WR_LOCAL_INV: info.op_type = IRDMA_OP_TYPE_INV_STAG; + info.local_fence = info.read_fence; info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; err = irdma_uk_stag_local_invalidate(ukqp, &info, true); break; From 7f875850f20a42f488840c9df7af91ef7db2d576 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 22 May 2023 20:09:57 +0900 Subject: [PATCH 172/303] ata: libata-scsi: Use correct device no in ata_find_dev() For devices not attached to a port multiplier and managed directly by libata, the device number passed to ata_find_dev() must always be lower than the maximum number of devices returned by ata_link_max_devices(). That is 1 for SATA devices or 2 for an IDE link with master+slave devices. This device number is the SCSI device ID which matches these constraints as the IDs are generated per port and so never exceed the maximum number of devices for the link being used. However, for libsas managed devices, SCSI device IDs are assigned per struct scsi_host, leading to device IDs for SATA devices that can be well in excess of libata per-link maximum number of devices. This results in ata_find_dev() to always return NULL for libsas managed devices except for the first device of the target scsi_host with ID (device number) equal to 0. This issue is visible by executing the hdparm utility, which fails. E.g.: hdparm -i /dev/sdX /dev/sdX: HDIO_GET_IDENTITY failed: No message of desired type Fix this by rewriting ata_find_dev() to ignore the device number for non-PMP attached devices with a link with at most 1 device, that is SATA devices. For these, the device number 0 is always used to return the correct pointer to the struct ata_device of the port link. This change excludes IDE master/slave setups (maximum number of devices per link is 2) and port-multiplier attached devices. Also, to be consistant with the fact that SCSI device IDs and channel numbers used as device numbers are both unsigned int, change the devno argument of ata_find_dev() to unsigned int. Reported-by: Xingui Yang Fixes: 41bda9c98035 ("libata-link: update hotplug to handle PMP links") Cc: stable@vger.kernel.org Signed-off-by: Damien Le Moal Reviewed-by: Jason Yan --- drivers/ata/libata-scsi.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 7bb12deab70c4..8ce90284eb344 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2694,18 +2694,36 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) return 0; } -static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) +static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno) { - if (!sata_pmp_attached(ap)) { - if (likely(devno >= 0 && - devno < ata_link_max_devices(&ap->link))) + /* + * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), + * or 2 (IDE master + slave case). However, the former case includes + * libsas hosted devices which are numbered per scsi host, leading + * to devno potentially being larger than 0 but with each struct + * ata_device having its own struct ata_port and struct ata_link. + * To accommodate these, ignore devno and always use device number 0. + */ + if (likely(!sata_pmp_attached(ap))) { + int link_max_devices = ata_link_max_devices(&ap->link); + + if (link_max_devices == 1) + return &ap->link.device[0]; + + if (devno < link_max_devices) return &ap->link.device[devno]; - } else { - if (likely(devno >= 0 && - devno < ap->nr_pmp_links)) - return &ap->pmp_link[devno].device[0]; + + return NULL; } + /* + * For PMP-attached devices, the device number corresponds to C + * (channel) of SCSI [H:C:I:L], indicating the port pmp link + * for the device. + */ + if (devno < ap->nr_pmp_links) + return &ap->pmp_link[devno].device[0]; + return NULL; } From 36936a56e1814f6c526fe71fbf980beab4f5577a Mon Sep 17 00:00:00 2001 From: Sebastian Krzyszkowiak Date: Fri, 26 May 2023 16:38:11 +0200 Subject: [PATCH 173/303] net: usb: qmi_wwan: Set DTR quirk for BroadMobi BM818 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BM818 is based on Qualcomm MDM9607 chipset. Fixes: 9a07406b00cd ("net: usb: qmi_wwan: Add the BroadMobi BM818 card") Cc: stable@vger.kernel.org Signed-off-by: Sebastian Krzyszkowiak Acked-by: Bjørn Mork Link: https://lore.kernel.org/r/20230526-bm818-dtr-v1-1-64bbfa6ba8af@puri.sm Signed-off-by: Jakub Kicinski --- drivers/net/usb/qmi_wwan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 571e37e67f9ce..f1865d047971b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1325,7 +1325,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ - {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ + {QMI_QUIRK_SET_DTR(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ From 0684f29a89e58944a9bfae3a8b5c1a217e44c960 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 May 2023 15:06:53 -0700 Subject: [PATCH 174/303] netlink: specs: correct types of legacy arrays ethtool has some attrs which dump multiple scalars into an attribute. The spec currently expects one attr per entry. Fixes: a353318ebf24 ("tools: ynl: populate most of the ethtool spec") Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20230526220653.65538-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- Documentation/netlink/specs/ethtool.yaml | 32 ++++++------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index 129f413ea3496..3abc576ff7976 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -60,22 +60,6 @@ attribute-sets: type: nest nested-attributes: bitset-bits - - - name: u64-array - attributes: - - - name: u64 - type: nest - multi-attr: true - nested-attributes: u64 - - - name: s32-array - attributes: - - - name: s32 - type: nest - multi-attr: true - nested-attributes: s32 - name: string attributes: @@ -705,16 +689,16 @@ attribute-sets: type: u8 - name: corrected - type: nest - nested-attributes: u64-array + type: binary + sub-type: u64 - name: uncorr - type: nest - nested-attributes: u64-array + type: binary + sub-type: u64 - name: corr-bits - type: nest - nested-attributes: u64-array + type: binary + sub-type: u64 - name: fec attributes: @@ -827,8 +811,8 @@ attribute-sets: type: u32 - name: index - type: nest - nested-attributes: s32-array + type: binary + sub-type: s32 - name: module attributes: From 6ffc57ea004234d9373c57b204fd10370a69f392 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 26 May 2023 15:43:42 +0000 Subject: [PATCH 175/303] af_packet: do not use READ_ONCE() in packet_bind() A recent patch added READ_ONCE() in packet_bind() and packet_bind_spkt() This is better handled by reading pkt_sk(sk)->num later in packet_do_bind() while appropriate lock is held. READ_ONCE() in writers are often an evidence of something being wrong. Fixes: 822b5a1c17df ("af_packet: Fix data-races of pkt_sk(sk)->num.") Signed-off-by: Eric Dumazet Reviewed-by: Willem de Bruijn Reviewed-by: Jiri Pirko Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230526154342.2533026-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/packet/af_packet.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a1f9a0e9f3c8a..a2dbeb264f260 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3201,6 +3201,9 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, lock_sock(sk); spin_lock(&po->bind_lock); + if (!proto) + proto = po->num; + rcu_read_lock(); if (po->fanout) { @@ -3299,7 +3302,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min)); name[sizeof(uaddr->sa_data_min)] = 0; - return packet_do_bind(sk, name, 0, READ_ONCE(pkt_sk(sk)->num)); + return packet_do_bind(sk, name, 0, 0); } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) @@ -3316,8 +3319,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len if (sll->sll_family != AF_PACKET) return -EINVAL; - return packet_do_bind(sk, NULL, sll->sll_ifindex, - sll->sll_protocol ? : READ_ONCE(pkt_sk(sk)->num)); + return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol); } static struct proto packet_proto = { From 4faeee0cf8a5d88d63cdbc3bab124fb0e6aed08c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 26 May 2023 16:34:58 +0000 Subject: [PATCH 176/303] tcp: deny tcp_disconnect() when threads are waiting Historically connect(AF_UNSPEC) has been abused by syzkaller and other fuzzers to trigger various bugs. A recent one triggers a divide-by-zero [1], and Paolo Abeni was able to diagnose the issue. tcp_recvmsg_locked() has tests about sk_state being not TCP_LISTEN and TCP REPAIR mode being not used. Then later if socket lock is released in sk_wait_data(), another thread can call connect(AF_UNSPEC), then make this socket a TCP listener. When recvmsg() is resumed, it can eventually call tcp_cleanup_rbuf() and attempt a divide by 0 in tcp_rcv_space_adjust() [1] This patch adds a new socket field, counting number of threads blocked in sk_wait_event() and inet_wait_for_connect(). If this counter is not zero, tcp_disconnect() returns an error. This patch adds code in blocking socket system calls, thus should not hurt performance of non blocking ones. Note that we probably could revert commit 499350a5a6e7 ("tcp: initialize rcv_mss to TCP_MIN_MSS instead of 0") to restore original tcpi_rcv_mss meaning (was 0 if no payload was ever received on a socket) [1] divide error: 0000 [#1] PREEMPT SMP KASAN CPU: 0 PID: 13832 Comm: syz-executor.5 Not tainted 6.3.0-rc4-syzkaller-00224-g00c7b5f4ddc5 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/02/2023 RIP: 0010:tcp_rcv_space_adjust+0x36e/0x9d0 net/ipv4/tcp_input.c:740 Code: 00 00 00 00 fc ff df 4c 89 64 24 48 8b 44 24 04 44 89 f9 41 81 c7 80 03 00 00 c1 e1 04 44 29 f0 48 63 c9 48 01 e9 48 0f af c1 <49> f7 f6 48 8d 04 41 48 89 44 24 40 48 8b 44 24 30 48 c1 e8 03 48 RSP: 0018:ffffc900033af660 EFLAGS: 00010206 RAX: 4a66b76cbade2c48 RBX: ffff888076640cc0 RCX: 00000000c334e4ac RDX: 0000000000000000 RSI: dffffc0000000000 RDI: 0000000000000001 RBP: 00000000c324e86c R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffff8880766417f8 R13: ffff888028fbb980 R14: 0000000000000000 R15: 0000000000010344 FS: 00007f5bffbfe700(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000001b32f25000 CR3: 000000007ced0000 CR4: 00000000003506f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: tcp_recvmsg_locked+0x100e/0x22e0 net/ipv4/tcp.c:2616 tcp_recvmsg+0x117/0x620 net/ipv4/tcp.c:2681 inet6_recvmsg+0x114/0x640 net/ipv6/af_inet6.c:670 sock_recvmsg_nosec net/socket.c:1017 [inline] sock_recvmsg+0xe2/0x160 net/socket.c:1038 ____sys_recvmsg+0x210/0x5a0 net/socket.c:2720 ___sys_recvmsg+0xf2/0x180 net/socket.c:2762 do_recvmmsg+0x25e/0x6e0 net/socket.c:2856 __sys_recvmmsg net/socket.c:2935 [inline] __do_sys_recvmmsg net/socket.c:2958 [inline] __se_sys_recvmmsg net/socket.c:2951 [inline] __x64_sys_recvmmsg+0x20f/0x260 net/socket.c:2951 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x39/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7f5c0108c0f9 Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 f1 19 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f5bffbfe168 EFLAGS: 00000246 ORIG_RAX: 000000000000012b RAX: ffffffffffffffda RBX: 00007f5c011ac050 RCX: 00007f5c0108c0f9 RDX: 0000000000000001 RSI: 0000000020000bc0 RDI: 0000000000000003 RBP: 00007f5c010e7b39 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000122 R11: 0000000000000246 R12: 0000000000000000 R13: 00007f5c012cfb1f R14: 00007f5bffbfe300 R15: 0000000000022000 Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot Reported-by: Paolo Abeni Diagnosed-by: Paolo Abeni Signed-off-by: Eric Dumazet Tested-by: Paolo Abeni Link: https://lore.kernel.org/r/20230526163458.2880232-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/net/sock.h | 4 ++++ net/ipv4/af_inet.c | 2 ++ net/ipv4/inet_connection_sock.c | 1 + net/ipv4/tcp.c | 6 ++++++ 4 files changed, 13 insertions(+) diff --git a/include/net/sock.h b/include/net/sock.h index 656ea89f60ff9..b418425d7230c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -336,6 +336,7 @@ struct sk_filter; * @sk_cgrp_data: cgroup data for this cgroup * @sk_memcg: this socket's memory cgroup association * @sk_write_pending: a write to stream socket waits to start + * @sk_wait_pending: number of threads blocked on this socket * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed * @sk_write_space: callback to indicate there is bf sending space available @@ -428,6 +429,7 @@ struct sock { unsigned int sk_napi_id; #endif int sk_rcvbuf; + int sk_wait_pending; struct sk_filter __rcu *sk_filter; union { @@ -1174,6 +1176,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk) #define sk_wait_event(__sk, __timeo, __condition, __wait) \ ({ int __rc; \ + __sk->sk_wait_pending++; \ release_sock(__sk); \ __rc = __condition; \ if (!__rc) { \ @@ -1183,6 +1186,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk) } \ sched_annotate_sleep(); \ lock_sock(__sk); \ + __sk->sk_wait_pending--; \ __rc = __condition; \ __rc; \ }) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c4aab3aacbd89..4a76ebf793b85 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -586,6 +586,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) add_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending += writebias; + sk->sk_wait_pending++; /* Basic assumption: if someone sets sk->sk_err, he _must_ * change state of the socket from TCP_SYN_*. @@ -601,6 +602,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) } remove_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending -= writebias; + sk->sk_wait_pending--; return timeo; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 65ad4251f6fd8..1386787eaf1a5 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1142,6 +1142,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, if (newsk) { struct inet_connection_sock *newicsk = inet_csk(newsk); + newsk->sk_wait_pending = 0; inet_sk_set_state(newsk, TCP_SYN_RECV); newicsk->icsk_bind_hash = NULL; newicsk->icsk_bind2_hash = NULL; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index a60f6f4e7cd91..635607ac37e45 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3081,6 +3081,12 @@ int tcp_disconnect(struct sock *sk, int flags) int old_state = sk->sk_state; u32 seq; + /* Deny disconnect if other threads are blocked in sk_wait_event() + * or inet_wait_for_connect(). + */ + if (sk->sk_wait_pending) + return -EBUSY; + if (old_state != TCP_CLOSE) tcp_set_state(sk, TCP_CLOSE); From 34dfde4ad87b84d21278a7e19d92b5b2c68e6c4d Mon Sep 17 00:00:00 2001 From: Cambda Zhu Date: Sat, 27 May 2023 12:03:17 +0800 Subject: [PATCH 177/303] tcp: Return user_mss for TCP_MAXSEG in CLOSE/LISTEN state if user_mss set This patch replaces the tp->mss_cache check in getting TCP_MAXSEG with tp->rx_opt.user_mss check for CLOSE/LISTEN sock. Since tp->mss_cache is initialized with TCP_MSS_DEFAULT, checking if it's zero is probably a bug. With this change, getting TCP_MAXSEG before connecting will return default MSS normally, and return user_mss if user_mss is set. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: Jack Yang Suggested-by: Eric Dumazet Link: https://lore.kernel.org/netdev/CANn89i+3kL9pYtkxkwxwNMzvC_w3LNUum_2=3u+UyLBmGmifHA@mail.gmail.com/#t Signed-off-by: Cambda Zhu Link: https://lore.kernel.org/netdev/14D45862-36EA-4076-974C-EA67513C92F6@linux.alibaba.com/ Reviewed-by: Jason Xing Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230527040317.68247-1-cambda@linux.alibaba.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 635607ac37e45..8d20d9221238c 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4078,7 +4078,8 @@ int do_tcp_getsockopt(struct sock *sk, int level, switch (optname) { case TCP_MAXSEG: val = tp->mss_cache; - if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) + if (tp->rx_opt.user_mss && + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) val = tp->rx_opt.user_mss; if (tp->repair) val = tp->rx_opt.mss_clamp; From 81d358b118dc364bd147432db569d4d400a5a4f2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 25 May 2023 12:43:21 +1000 Subject: [PATCH 178/303] powerpc/crypto: Fix aes-gcm-p10 link errors The recently added P10 AES/GCM code added some files containing CRYPTOGAMS perl-asm code which are near duplicates of the p8 files found in drivers/crypto/vmx. In particular the newly added files produce functions with identical names to the existing code. When the kernel is built with CONFIG_CRYPTO_AES_GCM_P10=y and CONFIG_CRYPTO_DEV_VMX_ENCRYPT=y that leads to link errors, eg: ld: drivers/crypto/vmx/aesp8-ppc.o: in function `aes_p8_set_encrypt_key': (.text+0xa0): multiple definition of `aes_p8_set_encrypt_key'; arch/powerpc/crypto/aesp8-ppc.o:(.text+0xa0): first defined here ... ld: drivers/crypto/vmx/ghashp8-ppc.o: in function `gcm_ghash_p8': (.text+0x140): multiple definition of `gcm_ghash_p8'; arch/powerpc/crypto/ghashp8-ppc.o:(.text+0x2e4): first defined here Fix it for now by renaming the newly added files and functions to use "p10" instead of "p8" in the names. Fixes: 45a4672b9a6e ("crypto: p10-aes-gcm - Update Kconfig and Makefile") Tested-by: Vishal Chourasia Signed-off-by: Michael Ellerman Link: https://msgid.link/20230525150501.37081-1-mpe@ellerman.id.au --- arch/powerpc/crypto/Makefile | 10 +++++----- arch/powerpc/crypto/aes-gcm-p10-glue.c | 18 +++++++++--------- .../crypto/{aesp8-ppc.pl => aesp10-ppc.pl} | 2 +- .../crypto/{ghashp8-ppc.pl => ghashp10-ppc.pl} | 12 ++++++------ 4 files changed, 21 insertions(+), 21 deletions(-) rename arch/powerpc/crypto/{aesp8-ppc.pl => aesp10-ppc.pl} (99%) rename arch/powerpc/crypto/{ghashp8-ppc.pl => ghashp10-ppc.pl} (97%) diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile index 05c7486f42c58..7b4f516abec1d 100644 --- a/arch/powerpc/crypto/Makefile +++ b/arch/powerpc/crypto/Makefile @@ -22,15 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o -aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o +aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o quiet_cmd_perl = PERL $@ cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@ -targets += aesp8-ppc.S ghashp8-ppc.S +targets += aesp10-ppc.S ghashp10-ppc.S -$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE +$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE $(call if_changed,perl) -OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y -OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y +OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y +OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c index bd3475f5348d9..4b6e899895e7b 100644 --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c @@ -30,15 +30,15 @@ MODULE_AUTHOR("Danny Tsen aadLen = alen; i = alen & ~0xf; if (i) { - gcm_ghash_p8(nXi, hash->Htable+32, aad, i); + gcm_ghash_p10(nXi, hash->Htable+32, aad, i); aad += i; alen -= i; } @@ -102,7 +102,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash, nXi[i] ^= aad[i]; memset(gctx->aad_hash, 0, 16); - gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16); + gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16); } else { memcpy(gctx->aad_hash, nXi, 16); } @@ -115,7 +115,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey, { __be32 counter = cpu_to_be32(1); - aes_p8_encrypt(hash->H, hash->H, rdkey); + aes_p10_encrypt(hash->H, hash->H, rdkey); set_subkey(hash->H); gcm_init_htable(hash->Htable+32, hash->H); @@ -126,7 +126,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey, /* * Encrypt counter vector as iv tag and increment counter. */ - aes_p8_encrypt(iv, gctx->ivtag, rdkey); + aes_p10_encrypt(iv, gctx->ivtag, rdkey); counter = cpu_to_be32(2); *((__be32 *)(iv+12)) = counter; @@ -160,7 +160,7 @@ static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len) /* * hash (AAD len and len) */ - gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16); + gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16); for (i = 0; i < 16; i++) hash->Htable[i] ^= gctx->ivtag[i]; @@ -192,7 +192,7 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, int ret; vsx_begin(); - ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key); vsx_end(); return ret ? -EINVAL : 0; diff --git a/arch/powerpc/crypto/aesp8-ppc.pl b/arch/powerpc/crypto/aesp10-ppc.pl similarity index 99% rename from arch/powerpc/crypto/aesp8-ppc.pl rename to arch/powerpc/crypto/aesp10-ppc.pl index 1f22aec27d799..2c06ce2a2c7c0 100644 --- a/arch/powerpc/crypto/aesp8-ppc.pl +++ b/arch/powerpc/crypto/aesp10-ppc.pl @@ -110,7 +110,7 @@ open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; $FRAME=8*$SIZE_T; -$prefix="aes_p8"; +$prefix="aes_p10"; $sp="r1"; $vrsave="r12"; diff --git a/arch/powerpc/crypto/ghashp8-ppc.pl b/arch/powerpc/crypto/ghashp10-ppc.pl similarity index 97% rename from arch/powerpc/crypto/ghashp8-ppc.pl rename to arch/powerpc/crypto/ghashp10-ppc.pl index b56603b4a893c..27a6b0bec645f 100644 --- a/arch/powerpc/crypto/ghashp8-ppc.pl +++ b/arch/powerpc/crypto/ghashp10-ppc.pl @@ -64,7 +64,7 @@ .text -.globl .gcm_init_p8 +.globl .gcm_init_p10 lis r0,0xfff0 li r8,0x10 mfspr $vrsave,256 @@ -110,7 +110,7 @@ .long 0 .byte 0,12,0x14,0,0,0,2,0 .long 0 -.size .gcm_init_p8,.-.gcm_init_p8 +.size .gcm_init_p10,.-.gcm_init_p10 .globl .gcm_init_htable lis r0,0xfff0 @@ -237,7 +237,7 @@ .long 0 .size .gcm_init_htable,.-.gcm_init_htable -.globl .gcm_gmult_p8 +.globl .gcm_gmult_p10 lis r0,0xfff8 li r8,0x10 mfspr $vrsave,256 @@ -283,9 +283,9 @@ .long 0 .byte 0,12,0x14,0,0,0,2,0 .long 0 -.size .gcm_gmult_p8,.-.gcm_gmult_p8 +.size .gcm_gmult_p10,.-.gcm_gmult_p10 -.globl .gcm_ghash_p8 +.globl .gcm_ghash_p10 lis r0,0xfff8 li r8,0x10 mfspr $vrsave,256 @@ -350,7 +350,7 @@ .long 0 .byte 0,12,0x14,0,0,0,4,0 .long 0 -.size .gcm_ghash_p8,.-.gcm_ghash_p8 +.size .gcm_ghash_p10,.-.gcm_ghash_p10 .asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by " .align 2 From 9d2ccf00bddc268045e3d65a8108d61ada0e4b4e Mon Sep 17 00:00:00 2001 From: Gaurav Batra Date: Thu, 25 May 2023 09:34:54 -0500 Subject: [PATCH 179/303] powerpc/iommu: Limit number of TCEs to 512 for H_STUFF_TCE hcall MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently in tce_freemulti_pSeriesLP() there is no limit on how many TCEs are passed to the H_STUFF_TCE hcall. This has not caused an issue until now, but newer firmware releases have started enforcing a limit of 512 TCEs per call. The limit is correct per the specification (PAPR v2.12 § 14.5.4.2.3). The code has been in it's current form since it was initially merged. Cc: stable@vger.kernel.org Signed-off-by: Gaurav Batra Reviewed-by: Brian King [mpe: Tweak change log wording & add PAPR reference] Signed-off-by: Michael Ellerman Link: https://msgid.link/20230525143454.56878-1-gbatra@linux.vnet.ibm.com --- arch/powerpc/platforms/pseries/iommu.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 918f511837db4..d59e8a98a2008 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -317,13 +317,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift, static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) { u64 rc; + long rpages = npages; + unsigned long limit; if (!firmware_has_feature(FW_FEATURE_STUFF_TCE)) return tce_free_pSeriesLP(tbl->it_index, tcenum, tbl->it_page_shift, npages); - rc = plpar_tce_stuff((u64)tbl->it_index, - (u64)tcenum << tbl->it_page_shift, 0, npages); + do { + limit = min_t(unsigned long, rpages, 512); + + rc = plpar_tce_stuff((u64)tbl->it_index, + (u64)tcenum << tbl->it_page_shift, 0, limit); + + rpages -= limit; + tcenum += limit; + } while (rpages > 0 && !rc); if (rc && printk_ratelimit()) { printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); From 719dfd5925e186e09a2a6f23016936ac436f3d78 Mon Sep 17 00:00:00 2001 From: Maninder Singh Date: Mon, 29 May 2023 16:43:37 +0530 Subject: [PATCH 180/303] powerpc/xmon: Use KSYM_NAME_LEN in array size kallsyms_lookup() which in turn calls kallsyms_lookup_buildid() writes to index "KSYM_NAME_LEN - 1". Thus the array passed as namebuf to kallsyms_lookup() should be KSYM_NAME_LEN in size. In xmon.c the array was defined to be "128" bytes directly, without using KSYM_NAME_LEN. Commit b8a94bfb3395 ("kallsyms: increase maximum kernel symbol length to 512") changed the value to 512, but missed updating the xmon code. Fixes: b8a94bfb3395 ("kallsyms: increase maximum kernel symbol length to 512") Cc: stable@vger.kernel.org # v6.1+ Co-developed-by: Onkarnath Signed-off-by: Onkarnath Signed-off-by: Maninder Singh [mpe: Tweak change log wording and fix commit reference] Signed-off-by: Michael Ellerman Link: https://msgid.link/20230529111337.352990-2-maninder1.s@samsung.com --- arch/powerpc/xmon/xmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 728d3c257e4a3..70c4c59a1a8f4 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -88,7 +88,7 @@ static unsigned long ndump = 64; static unsigned long nidump = 16; static unsigned long ncsum = 4096; static int termch; -static char tmpstr[128]; +static char tmpstr[KSYM_NAME_LEN]; static int tracing_enabled; static long bus_error_jmp[JMP_BUF_LEN]; From 811154e234db72f0a11557a84ba9640f8b3bc823 Mon Sep 17 00:00:00 2001 From: Akihiko Odaki Date: Tue, 30 May 2023 11:46:51 +0900 Subject: [PATCH 181/303] KVM: arm64: Populate fault info for watchpoint When handling ESR_ELx_EC_WATCHPT_LOW, far_el2 member of struct kvm_vcpu_fault_info will be copied to far member of struct kvm_debug_exit_arch and exposed to the userspace. The userspace will see stale values from older faults if the fault info does not get populated. Fixes: 8fb2046180a0 ("KVM: arm64: Move early handlers to per-EC handlers") Suggested-by: Marc Zyngier Signed-off-by: Akihiko Odaki Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230530024651.10014-1-akihiko.odaki@daynix.com Cc: stable@vger.kernel.org --- arch/arm64/kvm/hyp/include/hyp/switch.h | 8 ++++++-- arch/arm64/kvm/hyp/nvhe/switch.c | 2 ++ arch/arm64/kvm/hyp/vhe/switch.c | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e78a08a72a3c9..5c15c58f90cce 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -412,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) return false; } -static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) +static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) { if (!__populate_fault_info(vcpu)) return true; return false; } +static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) + __alias(kvm_hyp_handle_memory_fault); +static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) + __alias(kvm_hyp_handle_memory_fault); static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) { - if (!__populate_fault_info(vcpu)) + if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) return true; if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 71fa16a0dc775..77791495c9951 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; @@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = { [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 3d868e84c7a0a..7a1aa511e7da6 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; From 020c69c1a793ed29d28793808eddd75210c858dd Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 26 May 2023 12:34:54 +0100 Subject: [PATCH 182/303] rxrpc: Truncate UTS_RELEASE for rxrpc version UTS_RELEASE has a maximum length of 64 which can cause rxrpc_version to exceed the 65 byte message limit. Per the rx spec[1]: "If a server receives a packet with a type value of 13, and the client-initiated flag set, it should respond with a 65-byte payload containing a string that identifies the version of AFS software it is running." The current implementation causes a compile error when WERROR is turned on and/or UTS_RELEASE exceeds the length of 49 (making the version string more than 64 characters). Fix this by generating the string during module initialisation and limiting the UTS_RELEASE segment of the string does not exceed 49 chars. We need to make sure that the 64 bytes includes "linux-" at the front and " AF_RXRPC" at the back as this may be used in pattern matching. Fixes: 44ba06987c0b ("RxRPC: Handle VERSION Rx protocol packets") Reported-by: Kenny Ho Link: https://lore.kernel.org/r/20230523223944.691076-1-Kenny.Ho@amd.com/ Signed-off-by: David Howells Acked-by: Kenny Ho cc: Marc Dionne cc: Andrew Lunn cc: David Laight cc: "David S. Miller" cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: linux-afs@lists.infradead.org cc: netdev@vger.kernel.org Link: https://web.mit.edu/kolya/afs/rx/rx-spec [1] Reviewed-by: Simon Horman Reviewed-by: Jeffrey Altman Link: https://lore.kernel.org/r/654974.1685100894@warthog.procyon.org.uk Signed-off-by: Paolo Abeni --- net/rxrpc/af_rxrpc.c | 1 + net/rxrpc/ar-internal.h | 1 + net/rxrpc/local_event.c | 11 ++++++++++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 31f738d65f1c6..da0b3b5157d5f 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -980,6 +980,7 @@ static int __init af_rxrpc_init(void) BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb)); ret = -ENOMEM; + rxrpc_gen_version_string(); rxrpc_call_jar = kmem_cache_create( "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, SLAB_HWCACHE_ALIGN, NULL); diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 5d44dc08f66d0..e8e14c6f904d9 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -1068,6 +1068,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t, /* * local_event.c */ +void rxrpc_gen_version_string(void); void rxrpc_send_version_request(struct rxrpc_local *local, struct rxrpc_host_header *hdr, struct sk_buff *skb); diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c index 5e69ea6b233da..993c69f97488c 100644 --- a/net/rxrpc/local_event.c +++ b/net/rxrpc/local_event.c @@ -16,7 +16,16 @@ #include #include "ar-internal.h" -static const char rxrpc_version_string[65] = "linux-" UTS_RELEASE " AF_RXRPC"; +static char rxrpc_version_string[65]; // "linux-" UTS_RELEASE " AF_RXRPC"; + +/* + * Generate the VERSION packet string. + */ +void rxrpc_gen_version_string(void) +{ + snprintf(rxrpc_version_string, sizeof(rxrpc_version_string), + "linux-%.49s AF_RXRPC", UTS_RELEASE); +} /* * Reply to a version request From b24aa141c2ff26c919237aee61ea1818fc6780d9 Mon Sep 17 00:00:00 2001 From: Wen Gu Date: Fri, 26 May 2023 19:49:00 +0800 Subject: [PATCH 183/303] net/smc: Scan from current RMB list when no position specified When finding the first RMB of link group, it should start from the current RMB list whose index is 0. So fix it. Fixes: b4ba4652b3f8 ("net/smc: extend LLC layer for SMC-Rv2") Signed-off-by: Wen Gu Signed-off-by: Paolo Abeni --- net/smc/smc_llc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index a0840b8c935b8..8423e8e0063f4 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -578,7 +578,10 @@ static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, { struct smc_buf_desc *buf_next; - if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { + if (!buf_pos) + return _smc_llc_get_next_rmb(lgr, buf_lst); + + if (list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { (*buf_lst)++; return _smc_llc_get_next_rmb(lgr, buf_lst); } From 71c6aa0305e3d2365d3bfd0134b4025d9e7ba388 Mon Sep 17 00:00:00 2001 From: Wen Gu Date: Fri, 26 May 2023 19:49:01 +0800 Subject: [PATCH 184/303] net/smc: Don't use RMBs not mapped to new link in SMCRv2 ADD LINK We encountered a crash when using SMCRv2. It is caused by a logical error in smc_llc_fill_ext_v2(). BUG: kernel NULL pointer dereference, address: 0000000000000014 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP PTI CPU: 7 PID: 453 Comm: kworker/7:4 Kdump: loaded Tainted: G W E 6.4.0-rc3+ #44 Workqueue: events smc_llc_add_link_work [smc] RIP: 0010:smc_llc_fill_ext_v2+0x117/0x280 [smc] RSP: 0018:ffffacb5c064bd88 EFLAGS: 00010282 RAX: ffff9a6bc1c3c02c RBX: ffff9a6be3558000 RCX: 0000000000000000 RDX: 0000000000000002 RSI: 0000000000000002 RDI: 000000000000000a RBP: ffffacb5c064bdb8 R08: 0000000000000040 R09: 000000000000000c R10: ffff9a6bc0910300 R11: 0000000000000002 R12: 0000000000000000 R13: 0000000000000002 R14: ffff9a6bc1c3c02c R15: ffff9a6be3558250 FS: 0000000000000000(0000) GS:ffff9a6eefdc0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000014 CR3: 000000010b078003 CR4: 00000000003706e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: smc_llc_send_add_link+0x1ae/0x2f0 [smc] smc_llc_srv_add_link+0x2c9/0x5a0 [smc] ? cc_mkenc+0x40/0x60 smc_llc_add_link_work+0xb8/0x140 [smc] process_one_work+0x1e5/0x3f0 worker_thread+0x4d/0x2f0 ? __pfx_worker_thread+0x10/0x10 kthread+0xe5/0x120 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x2c/0x50 When an alernate RNIC is available in system, SMC will try to add a new link based on the RNIC for resilience. All the RMBs in use will be mapped to the new link. Then the RMBs' MRs corresponding to the new link will be filled into SMCRv2 LLC ADD LINK messages. However, smc_llc_fill_ext_v2() mistakenly accesses to unused RMBs which haven't been mapped to the new link and have no valid MRs, thus causing a crash. So this patch fixes the logic. Fixes: b4ba4652b3f8 ("net/smc: extend LLC layer for SMC-Rv2") Signed-off-by: Wen Gu Signed-off-by: Paolo Abeni --- net/smc/smc_llc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index 8423e8e0063f4..7a8d9163d186e 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -617,6 +617,8 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext, goto out; buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); for (i = 0; i < ext->num_rkeys; i++) { + while (buf_pos && !(buf_pos)->used) + buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); if (!buf_pos) break; rmb = buf_pos; @@ -626,8 +628,6 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext, cpu_to_be64((uintptr_t)rmb->cpu_addr) : cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); - while (buf_pos && !(buf_pos)->used) - buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); } len += i * sizeof(ext->rt[0]); out: From 929f4e7c06ca40c8e58b418c94708d880518d9b4 Mon Sep 17 00:00:00 2001 From: Ivan Bornyakov Date: Tue, 30 May 2023 21:49:35 +0800 Subject: [PATCH 185/303] MAINTAINERS: update Microchip MPF FPGA reviewers As I'm leaving Metrotek, hand over reviewing duty of Microchip MPF FPGA driver to Vladimir. Signed-off-by: Ivan Bornyakov Acked-by: Conor Dooley Acked-by: Vladimir Georgiev Acked-by: Xu Yilun Link: https://lore.kernel.org/r/20230429104838.5064-2-i.bornyakov@metrotek.ru Signed-off-by: Xu Yilun Link: https://lore.kernel.org/r/20230530134936.634370-2-yilun.xu@intel.com Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 250518fc70ff5..205b545d0a4a0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13836,7 +13836,7 @@ F: drivers/tty/serial/8250/8250_pci1xxxx.c MICROCHIP POLARFIRE FPGA DRIVERS M: Conor Dooley -R: Ivan Bornyakov +R: Vladimir Georgiev L: linux-fpga@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml From 9da6225bc7377941acff4476493d515b9fdfea48 Mon Sep 17 00:00:00 2001 From: Ivan Bornyakov Date: Tue, 30 May 2023 21:49:36 +0800 Subject: [PATCH 186/303] dt-bindings: fpga: replace Ivan Bornyakov maintainership As I'm leaving Metrotek, hand over Lattice Slave SPI sysCONFIG FPGA manager and Microchip Polarfire FPGA manager maintainership duties to Vladimir. Signed-off-by: Ivan Bornyakov Acked-by: Conor Dooley Acked-by: Vladimir Georgiev Acked-by: Rob Herring Link: https://lore.kernel.org/r/20230429104838.5064-3-i.bornyakov@metrotek.ru Signed-off-by: Xu Yilun Link: https://lore.kernel.org/r/20230530134936.634370-3-yilun.xu@intel.com Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml | 2 +- .../devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml b/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml index 4fb05eb84e2af..164331eb62750 100644 --- a/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml +++ b/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Lattice Slave SPI sysCONFIG FPGA manager maintainers: - - Ivan Bornyakov + - Vladimir Georgiev description: | Lattice sysCONFIG port, which is used for FPGA configuration, among others, diff --git a/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml b/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml index 527532f039cec..a157eecfb5fc3 100644 --- a/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml +++ b/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Microchip Polarfire FPGA manager. maintainers: - - Ivan Bornyakov + - Vladimir Georgiev description: Device Tree Bindings for Microchip Polarfire FPGA Manager using slave SPI to From 71698da37ce70c457751baea507a122851a2d481 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Iglesias=20Gons=C3=A1lvez?= Date: Tue, 30 May 2023 10:35:46 +0200 Subject: [PATCH 187/303] MAINTAINERS: Vaibhav Gupta is the new ipack maintainer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I have no longer access to the HW, nor time to properly maintain it. Adding Vaibhav as maintainer as he currently has access to the HW, he is working at CERN (user of these drivers) and he is maintaining them internally there. Signed-off-by: Samuel Iglesias Gonsálvez Acked-by: Vaibhav Gupta Link: https://lore.kernel.org/r/20230530083546.4831-1-vaibhavgupta40@gmail.com Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 205b545d0a4a0..4d623e78ac2fd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10114,7 +10114,7 @@ S: Maintained F: Documentation/process/kernel-docs.rst INDUSTRY PACK SUBSYSTEM (IPACK) -M: Samuel Iglesias Gonsalvez +M: Vaibhav Gupta M: Jens Taprogge M: Greg Kroah-Hartman L: industrypack-devel@lists.sourceforge.net From 91539341a3b6e9c868024a4292455dae36e6f58c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 30 May 2023 11:01:22 +0100 Subject: [PATCH 188/303] irqchip/gic: Correctly validate OF quirk descriptors When checking for OF quirks, make sure either 'compatible' or 'property' is set, and give up otherwise. This avoids non-OF quirks being randomly applied as they don't have any of the OF data that need checking. Cc: Douglas Anderson Reported-by: Geert Uytterhoeven Tested-by: Geert Uytterhoeven Fixes: 44bd78dd2b88 ("irqchip/gic-v3: Disable pseudo NMIs on Mediatek devices w/ firmware issues") Signed-off-by: Marc Zyngier --- drivers/irqchip/irq-gic-common.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index de47b51cdadbe..afd6a1841715a 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -16,6 +16,8 @@ void gic_enable_of_quirks(const struct device_node *np, const struct gic_quirk *quirks, void *data) { for (; quirks->desc; quirks++) { + if (!quirks->compatible && !quirks->property) + continue; if (quirks->compatible && !of_device_is_compatible(np, quirks->compatible)) continue; From 1919b39fc6eabb9a6f9a51706ff6d03865f5df29 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Fri, 26 May 2023 08:38:57 -0700 Subject: [PATCH 189/303] net: mana: Fix perf regression: remove rx_cqes, tx_cqes counters The apc->eth_stats.rx_cqes is one per NIC (vport), and it's on the frequent and parallel code path of all queues. So, r/w into this single shared variable by many threads on different CPUs creates a lot caching and memory overhead, hence perf regression. And, it's not accurate due to the high volume concurrent r/w. For example, a workload is iperf with 128 threads, and with RPS enabled. We saw perf regression of 25% with the previous patch adding the counters. And this patch eliminates the regression. Since the error path of mana_poll_rx_cq() already has warnings, so keeping the counter and convert it to a per-queue variable is not necessary. So, just remove this counter from this high frequency code path. Also, remove the tx_cqes counter for the same reason. We have warnings & other counters for errors on that path, and don't need to count every normal cqe processing. Cc: stable@vger.kernel.org Fixes: bd7fc6e1957c ("net: mana: Add new MANA VF performance counters for easier troubleshooting") Signed-off-by: Haiyang Zhang Reviewed-by: Horatiu Vultur Reviewed-by: Jiri Pirko Link: https://lore.kernel.org/r/1685115537-31675-1-git-send-email-haiyangz@microsoft.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/microsoft/mana/mana_en.c | 10 ---------- drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 2 -- include/net/mana/mana.h | 2 -- 3 files changed, 14 deletions(-) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 06d6292e09b33..d907727c7b7a5 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1279,8 +1279,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq) if (comp_read < 1) return; - apc->eth_stats.tx_cqes = comp_read; - for (i = 0; i < comp_read; i++) { struct mana_tx_comp_oob *cqe_oob; @@ -1363,8 +1361,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq) WARN_ON_ONCE(1); cq->work_done = pkt_transmitted; - - apc->eth_stats.tx_cqes -= pkt_transmitted; } static void mana_post_pkt_rxq(struct mana_rxq *rxq) @@ -1626,15 +1622,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq) { struct gdma_comp *comp = cq->gdma_comp_buf; struct mana_rxq *rxq = cq->rxq; - struct mana_port_context *apc; int comp_read, i; - apc = netdev_priv(rxq->ndev); - comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); - apc->eth_stats.rx_cqes = comp_read; rxq->xdp_flush = false; for (i = 0; i < comp_read; i++) { @@ -1646,8 +1638,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq) return; mana_process_rx_cqe(rxq, cq, &comp[i]); - - apc->eth_stats.rx_cqes--; } if (rxq->xdp_flush) diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index a64c81410dc14..0dc78679f6209 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -13,11 +13,9 @@ static const struct { } mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, - {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)}, {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, tx_cqe_unknown_type)}, - {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)}, {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, rx_coalesced_err)}, {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index cd386aa7c7cc3..9eef199728454 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -347,10 +347,8 @@ struct mana_tx_qp { struct mana_ethtool_stats { u64 stop_queue; u64 wake_queue; - u64 tx_cqes; u64 tx_cqe_err; u64 tx_cqe_unknown_type; - u64 rx_cqes; u64 rx_coalesced_err; u64 rx_cqe_unknown_type; }; From d328fe87067480cf2bd0b58dab428a98d31dbb7e Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Sun, 28 May 2023 19:35:26 +0200 Subject: [PATCH 190/303] selftests: mptcp: join: avoid using 'cmp --bytes' BusyBox's 'cmp' command doesn't support the '--bytes' parameter. Some CIs -- i.e. LKFT -- use BusyBox and have the mptcp_join.sh test failing [1] because their 'cmp' command doesn't support this '--bytes' option: cmp: unrecognized option '--bytes=1024' BusyBox v1.35.0 () multi-call binary. Usage: cmp [-ls] [-n NUM] FILE1 [FILE2] Instead, 'head --bytes' can be used as this option is supported by BusyBox. A temporary file is needed for this operation. Because it is apparently quite common to use BusyBox, it is certainly better to backport this fix to impacted kernels. Fixes: 6bf41020b72b ("selftests: mptcp: update and extend fastclose test-cases") Cc: stable@vger.kernel.org Link: https://qa-reports.linaro.org/lkft/linux-mainline-master/build/v6.3-rc5-5-g148341f0a2f5/testrun/16088933/suite/kselftest-net-mptcp/test/net_mptcp_userspace_pm_sh/log [1] Suggested-by: Paolo Abeni Reviewed-by: Mat Martineau Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/mptcp/mptcp_join.sh | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 26310c17b4c6b..4f3fe45f8f715 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -17,6 +17,7 @@ sout="" cin="" cinfail="" cinsent="" +tmpfile="" cout="" capout="" ns1="" @@ -175,6 +176,7 @@ cleanup() { rm -f "$cin" "$cout" "$sinfail" rm -f "$sin" "$sout" "$cinsent" "$cinfail" + rm -f "$tmpfile" rm -rf $evts_ns1 $evts_ns2 cleanup_partial } @@ -383,9 +385,16 @@ check_transfer() fail_test return 1 fi - bytes="--bytes=${bytes}" + + # note: BusyBox's "cmp" command doesn't support --bytes + tmpfile=$(mktemp) + head --bytes="$bytes" "$in" > "$tmpfile" + mv "$tmpfile" "$in" + head --bytes="$bytes" "$out" > "$tmpfile" + mv "$tmpfile" "$out" + tmpfile="" fi - cmp -l "$in" "$out" ${bytes} | while read -r i a b; do + cmp -l "$in" "$out" | while read -r i a b; do local sum=$((0${a} + 0${b})) if [ $check_invert -eq 0 ] || [ $sum -ne $((0xff)) ]; then echo "[ FAIL ] $what does not match (in, out):" From d83013bdf90a7994a474b0e650a7fc94b0d4ded6 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Sun, 28 May 2023 19:35:27 +0200 Subject: [PATCH 191/303] selftests: mptcp: connect: skip if MPTCP is not supported Selftests are supposed to run on any kernels, including the old ones not supporting MPTCP. A new check is then added to make sure MPTCP is supported. If not, the test stops and is marked as "skipped". Note that this check can also mark the test as failed if 'SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES' env var is set to 1: by doing that, we can make sure a test is not being skipped by mistake. A new shared file is added here to be able to re-used the same check in the different selftests we have. Link: https://github.com/multipath-tcp/mptcp_net-next/issues/368 Fixes: 048d19d444be ("mptcp: add basic kselftest for mptcp") Cc: stable@vger.kernel.org Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/mptcp/Makefile | 2 +- .../selftests/net/mptcp/mptcp_connect.sh | 4 ++ .../testing/selftests/net/mptcp/mptcp_lib.sh | 40 +++++++++++++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/net/mptcp/mptcp_lib.sh diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index 43a7236261261..7b936a9268594 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -9,7 +9,7 @@ TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \ TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq -TEST_FILES := settings +TEST_FILES := mptcp_lib.sh settings EXTRA_CLEAN := *.pcap diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh index a43d3e2f59bbe..c1f7bac199423 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + time_start=$(date +%s) optstring="S:R:d:e:l:r:h4cm:f:tC" @@ -141,6 +143,8 @@ cleanup() done } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh new file mode 100644 index 0000000000000..3286536b79d55 --- /dev/null +++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh @@ -0,0 +1,40 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 + +readonly KSFT_FAIL=1 +readonly KSFT_SKIP=4 + +# SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES env var can be set when validating all +# features using the last version of the kernel and the selftests to make sure +# a test is not being skipped by mistake. +mptcp_lib_expect_all_features() { + [ "${SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES:-}" = "1" ] +} + +# $1: msg +mptcp_lib_fail_if_expected_feature() { + if mptcp_lib_expect_all_features; then + echo "ERROR: missing feature: ${*}" + exit ${KSFT_FAIL} + fi + + return 1 +} + +# $1: file +mptcp_lib_has_file() { + local f="${1}" + + if [ -f "${f}" ]; then + return 0 + fi + + mptcp_lib_fail_if_expected_feature "${f} file not found" +} + +mptcp_lib_check_mptcp() { + if ! mptcp_lib_has_file "/proc/sys/net/mptcp/enabled"; then + echo "SKIP: MPTCP support is not available" + exit ${KSFT_SKIP} + fi +} From 0f4955a40dafe18a1122e3714d8173e4b018e869 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Sun, 28 May 2023 19:35:28 +0200 Subject: [PATCH 192/303] selftests: mptcp: pm nl: skip if MPTCP is not supported Selftests are supposed to run on any kernels, including the old ones not supporting MPTCP. A new check is then added to make sure MPTCP is supported. If not, the test stops and is marked as "skipped". Link: https://github.com/multipath-tcp/mptcp_net-next/issues/368 Fixes: eedbc685321b ("selftests: add PM netlink functional tests") Cc: stable@vger.kernel.org Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/mptcp/pm_netlink.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/net/mptcp/pm_netlink.sh b/tools/testing/selftests/net/mptcp/pm_netlink.sh index 89839d1ff9d83..32f7533e0919a 100755 --- a/tools/testing/selftests/net/mptcp/pm_netlink.sh +++ b/tools/testing/selftests/net/mptcp/pm_netlink.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + ksft_skip=4 ret=0 @@ -34,6 +36,8 @@ cleanup() ip netns del $ns1 } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" From 715c78a82e00f848f99ef76e6f6b89216ccba268 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Sun, 28 May 2023 19:35:29 +0200 Subject: [PATCH 193/303] selftests: mptcp: join: skip if MPTCP is not supported Selftests are supposed to run on any kernels, including the old ones not supporting MPTCP. A new check is then added to make sure MPTCP is supported. If not, the test stops and is marked as "skipped". Link: https://github.com/multipath-tcp/mptcp_net-next/issues/368 Fixes: b08fbf241064 ("selftests: add test-cases for MPTCP MP_JOIN") Cc: stable@vger.kernel.org Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/mptcp/mptcp_join.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 4f3fe45f8f715..96f63172b8fee 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -10,6 +10,8 @@ # because it's invoked by variable name, see how the "tests" array is used #shellcheck disable=SC2317 +. "$(dirname "${0}")/mptcp_lib.sh" + ret=0 sin="" sinfail="" @@ -137,6 +139,8 @@ cleanup_partial() check_tools() { + mptcp_lib_check_mptcp + if ! ip -Version &> /dev/null; then echo "SKIP: Could not run test without ip tool" exit $ksft_skip From 46565acdd29facbf418a11e4a3791b3c8967308d Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Sun, 28 May 2023 19:35:30 +0200 Subject: [PATCH 194/303] selftests: mptcp: diag: skip if MPTCP is not supported Selftests are supposed to run on any kernels, including the old ones not supporting MPTCP. A new check is then added to make sure MPTCP is supported. If not, the test stops and is marked as "skipped". Link: https://github.com/multipath-tcp/mptcp_net-next/issues/368 Fixes: df62f2ec3df6 ("selftests/mptcp: add diag interface tests") Cc: stable@vger.kernel.org Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/mptcp/diag.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh index ef628b16fe9b4..4eacdb1ab9628 100755 --- a/tools/testing/selftests/net/mptcp/diag.sh +++ b/tools/testing/selftests/net/mptcp/diag.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + sec=$(date +%s) rndh=$(printf %x $sec)-$(mktemp -u XXXXXX) ns="ns1-$rndh" @@ -31,6 +33,8 @@ cleanup() ip netns del $ns } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" From 9161f21c74a1a0e7bb39eb84ea0c86b23c92fc87 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Sun, 28 May 2023 19:35:31 +0200 Subject: [PATCH 195/303] selftests: mptcp: simult flows: skip if MPTCP is not supported Selftests are supposed to run on any kernels, including the old ones not supporting MPTCP. A new check is then added to make sure MPTCP is supported. If not, the test stops and is marked as "skipped". Link: https://github.com/multipath-tcp/mptcp_net-next/issues/368 Fixes: 1a418cb8e888 ("mptcp: simult flow self-tests") Cc: stable@vger.kernel.org Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/mptcp/simult_flows.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh index 9f22f7e5027df..36a3c9d92e205 100755 --- a/tools/testing/selftests/net/mptcp/simult_flows.sh +++ b/tools/testing/selftests/net/mptcp/simult_flows.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + sec=$(date +%s) rndh=$(printf %x $sec)-$(mktemp -u XXXXXX) ns1="ns1-$rndh" @@ -34,6 +36,8 @@ cleanup() done } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" From cf6f0fda7af7e8e016070bfee6b189e671a0c776 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Sun, 28 May 2023 19:35:32 +0200 Subject: [PATCH 196/303] selftests: mptcp: sockopt: skip if MPTCP is not supported Selftests are supposed to run on any kernels, including the old ones not supporting MPTCP. A new check is then added to make sure MPTCP is supported. If not, the test stops and is marked as "skipped". Link: https://github.com/multipath-tcp/mptcp_net-next/issues/368 Fixes: dc65fe82fb07 ("selftests: mptcp: add packet mark test case") Cc: stable@vger.kernel.org Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/mptcp/mptcp_sockopt.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh index 1b70c0a304cef..ff5adbb9c7f2b 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + ret=0 sin="" sout="" @@ -84,6 +86,8 @@ cleanup() rm -f "$sin" "$sout" } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" From 63212608a92a1ff10ae56dbb14e9fb685f7e4ffa Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Sun, 28 May 2023 19:35:33 +0200 Subject: [PATCH 197/303] selftests: mptcp: userspace pm: skip if MPTCP is not supported Selftests are supposed to run on any kernels, including the old ones not supporting MPTCP. A new check is then added to make sure MPTCP is supported. If not, the test stops and is marked as "skipped". Link: https://github.com/multipath-tcp/mptcp_net-next/issues/368 Fixes: 259a834fadda ("selftests: mptcp: functional tests for the userspace PM type") Cc: stable@vger.kernel.org Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/mptcp/userspace_pm.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh index b1eb7bce599dc..8092399d911f1 100755 --- a/tools/testing/selftests/net/mptcp/userspace_pm.sh +++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh @@ -1,6 +1,10 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Cannot not run test without ip tool" From 134f49dec0b6aca3259cd8259de4c572048bd207 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 14 May 2023 13:25:42 +0200 Subject: [PATCH 198/303] serial: 8250_tegra: Fix an error handling path in tegra_uart_probe() If an error occurs after reset_control_deassert(), it must be re-asserted, as already done in the .remove() function. Fixes: c6825c6395b7 ("serial: 8250_tegra: Create Tegra specific 8250 driver") Cc: stable Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/f8130f35339cc80edc6b9aac4bb2a60b60a226bf.1684063511.git.christophe.jaillet@wanadoo.fr Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_tegra.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c index 2509e7f74ccf2..89956bbf34d9e 100644 --- a/drivers/tty/serial/8250/8250_tegra.c +++ b/drivers/tty/serial/8250/8250_tegra.c @@ -113,13 +113,15 @@ static int tegra_uart_probe(struct platform_device *pdev) ret = serial8250_register_8250_port(&port8250); if (ret < 0) - goto err_clkdisable; + goto err_ctrl_assert; platform_set_drvdata(pdev, uart); uart->line = ret; return 0; +err_ctrl_assert: + reset_control_assert(uart->rst); err_clkdisable: clk_disable_unprepare(uart->clk); From 2474e05467c00f7d51af3039b664de6886325257 Mon Sep 17 00:00:00 2001 From: Sherry Sun Date: Fri, 19 May 2023 17:47:51 +0800 Subject: [PATCH 199/303] tty: serial: fsl_lpuart: use UARTCTRL_TXINV to send break instead of UARTCTRL_SBK LPUART IP now has two known bugs, one is that CTS has higher priority than the break signal, which causes the break signal sending through UARTCTRL_SBK may impacted by the CTS input if the HW flow control is enabled. It exists on all platforms we support in this driver. So we add a workaround patch for this issue: commit c4c81db5cf8b ("tty: serial: fsl_lpuart: disable the CTS when send break signal"). Another IP bug is i.MX8QM LPUART may have an additional break character being sent after SBK was cleared. It may need to add some delay between clearing SBK and re-enabling CTS to ensure that the SBK latch are completely cleared. But we found that during the delay period before CTS is enabled, there is still a risk that Bluetooth data in TX FIFO may be sent out during this period because of break off and CTS disabled(even if BT sets CTS line deasserted, data is still sent to BT). Due to this risk, we have to drop the CTS-disabling workaround for SBK bugs, use TXINV seems to be a better way to replace SBK feature and avoid above risk. Also need to disable the transmitter to prevent any data from being sent out during break, then invert the TX line to send break. Then disable the TXINV when turn off break and re-enable transmitter. Fixes: c4c81db5cf8b ("tty: serial: fsl_lpuart: disable the CTS when send break signal") Cc: stable Signed-off-by: Sherry Sun Link: https://lore.kernel.org/r/20230519094751.28948-1-sherry.sun@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/fsl_lpuart.c | 44 +++++++++++++++++---------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index c91916e136483..7486a2b8556c8 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1495,34 +1495,36 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state) static void lpuart32_break_ctl(struct uart_port *port, int break_state) { - unsigned long temp, modem; - struct tty_struct *tty; - unsigned int cflag = 0; - - tty = tty_port_tty_get(&port->state->port); - if (tty) { - cflag = tty->termios.c_cflag; - tty_kref_put(tty); - } + unsigned long temp; - temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK; - modem = lpuart32_read(port, UARTMODIR); + temp = lpuart32_read(port, UARTCTRL); + /* + * LPUART IP now has two known bugs, one is CTS has higher priority than the + * break signal, which causes the break signal sending through UARTCTRL_SBK + * may impacted by the CTS input if the HW flow control is enabled. It + * exists on all platforms we support in this driver. + * Another bug is i.MX8QM LPUART may have an additional break character + * being sent after SBK was cleared. + * To avoid above two bugs, we use Transmit Data Inversion function to send + * the break signal instead of UARTCTRL_SBK. + */ if (break_state != 0) { - temp |= UARTCTRL_SBK; /* - * LPUART CTS has higher priority than SBK, need to disable CTS before - * asserting SBK to avoid any interference if flow control is enabled. + * Disable the transmitter to prevent any data from being sent out + * during break, then invert the TX line to send break. */ - if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE) - lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); + temp &= ~UARTCTRL_TE; + lpuart32_write(port, temp, UARTCTRL); + temp |= UARTCTRL_TXINV; + lpuart32_write(port, temp, UARTCTRL); } else { - /* Re-enable the CTS when break off. */ - if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE)) - lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR); + /* Disable the TXINV to turn off break and re-enable transmitter. */ + temp &= ~UARTCTRL_TXINV; + lpuart32_write(port, temp, UARTCTRL); + temp |= UARTCTRL_TE; + lpuart32_write(port, temp, UARTCTRL); } - - lpuart32_write(port, temp, UARTCTRL); } static void lpuart_setup_watermark(struct lpuart_port *sport) From 192d43e91e0fdbd51b64ba36e773cbdf38dda505 Mon Sep 17 00:00:00 2001 From: Herve Codina Date: Tue, 23 May 2023 10:59:01 +0200 Subject: [PATCH 200/303] soc: fsl: cpm1: Fix TSA and QMC dependencies in case of COMPILE_TEST In order to compile tsa.c and qmc.c, CONFIG_CPM must be set. Without this dependency, the linker fails with some missing symbols for COMPILE_TEST configurations that need QMC without enabling CPM. Signed-off-by: Herve Codina Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202305160221.9XgweObz-lkp@intel.com/ Acked-by: Randy Dunlap Tested-by: Randy Dunlap # build-tested Link: https://lore.kernel.org/r/20230523085902.75837-2-herve.codina@bootlin.com Signed-off-by: Greg Kroah-Hartman --- drivers/soc/fsl/qe/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/fsl/qe/Kconfig b/drivers/soc/fsl/qe/Kconfig index 7268c2fbcbc1d..e0d096607fefb 100644 --- a/drivers/soc/fsl/qe/Kconfig +++ b/drivers/soc/fsl/qe/Kconfig @@ -36,7 +36,7 @@ config UCC config CPM_TSA tristate "CPM TSA support" depends on OF && HAS_IOMEM - depends on CPM1 || COMPILE_TEST + depends on CPM1 || (CPM && COMPILE_TEST) help Freescale CPM Time Slot Assigner (TSA) controller. @@ -47,7 +47,7 @@ config CPM_TSA config CPM_QMC tristate "CPM QMC support" depends on OF && HAS_IOMEM - depends on CPM1 || (FSL_SOC && COMPILE_TEST) + depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST) depends on CPM_TSA help Freescale CPM QUICC Multichannel Controller From 7183c37fd53eee1e795206e625da12a5d7ec1e1a Mon Sep 17 00:00:00 2001 From: Herve Codina Date: Tue, 23 May 2023 10:59:02 +0200 Subject: [PATCH 201/303] serial: cpm_uart: Fix a COMPILE_TEST dependency In a COMPILE_TEST configuration, the cpm_uart driver uses symbols from the cpm_uart_cpm2.c file. This file is compiled only when CONFIG_CPM2 is set. Without this dependency, the linker fails with some missing symbols for COMPILE_TEST configuration that needs SERIAL_CPM without enabling CPM2. This lead to: depends on CPM2 || CPM1 || (PPC32 && CPM2 && COMPILE_TEST) This dependency does not make sense anymore and can be simplified removing all the COMPILE_TEST part. Signed-off-by: Herve Codina Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202305160221.9XgweObz-lkp@intel.com/ Fixes: e3e7b13bffae ("serial: allow COMPILE_TEST for some drivers") Cc: stable Link: https://lore.kernel.org/r/20230523085902.75837-3-herve.codina@bootlin.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/Kconfig | 2 +- drivers/tty/serial/cpm_uart/cpm_uart.h | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 398e5aac2e77a..3e3fb377d90d4 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -762,7 +762,7 @@ config SERIAL_PMACZILOG_CONSOLE config SERIAL_CPM tristate "CPM SCC/SMC serial port support" - depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST) + depends on CPM2 || CPM1 select SERIAL_CORE help This driver supports the SCC and SMC serial ports on Motorola diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h index 0577618e78c04..46c03ed71c31b 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart.h +++ b/drivers/tty/serial/cpm_uart/cpm_uart.h @@ -19,8 +19,6 @@ struct gpio_desc; #include "cpm_uart_cpm2.h" #elif defined(CONFIG_CPM1) #include "cpm_uart_cpm1.h" -#elif defined(CONFIG_COMPILE_TEST) -#include "cpm_uart_cpm2.h" #endif #define SERIAL_CPM_MAJOR 204 From fcfe84236ec0974fe92f0578d1d58ed805c4b70f Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Tue, 30 May 2023 09:59:26 +0300 Subject: [PATCH 202/303] usb: typec: tps6598x: Fix broken polling mode after system suspend/resume During system resume we need to resume the polling workqueue if client->irq is not set else polling will no longer work. Fixes: 0d6a119cecd7 ("usb: typec: tps6598x: Add support for polling interrupts status") Signed-off-by: Roger Quadros Reviewed-by: Heikki Krogerus Link: https://lore.kernel.org/r/20230530065926.6161-1-rogerq@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/tipd/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 438cc40660a1b..603dbd44deba9 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -920,7 +920,7 @@ static int __maybe_unused tps6598x_resume(struct device *dev) enable_irq(client->irq); } - if (client->irq) + if (!client->irq) queue_delayed_work(system_power_efficient_wq, &tps->wq_poll, msecs_to_jiffies(POLL_INTERVAL)); From 31a5978243d24d77be4bacca56c78a0fbc43b00d Mon Sep 17 00:00:00 2001 From: "min15.li" Date: Fri, 26 May 2023 17:06:56 +0000 Subject: [PATCH 203/303] nvme: fix miss command type check In the function nvme_passthru_end(), only the value of the command opcode is checked, without checking the command type (IO command or Admin command). When we send a Dataset Management command (The opcode of the Dataset Management command is the same as the Set Feature command), kernel thinks it is a set feature command, then sets the controller's keep alive interval, and calls nvme_keep_alive_work(). Signed-off-by: min15.li Reviewed-by: Kanchan Joshi Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 4 +++- drivers/nvme/host/ioctl.c | 2 +- drivers/nvme/host/nvme.h | 2 +- drivers/nvme/target/passthru.c | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 1f0cbb77b2493..c6cba1cc101ca 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1115,7 +1115,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) } EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); -void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status) { if (effects & NVME_CMD_EFFECTS_CSE_MASK) { @@ -1132,6 +1132,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, nvme_queue_scan(ctrl); flush_work(&ctrl->scan_work); } + if (ns) + return; switch (cmd->common.opcode) { case nvme_admin_set_features: diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 81c5c9e384774..f15e7330b75ac 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, blk_mq_free_request(req); if (effects) - nvme_passthru_end(ctrl, effects, cmd, ret); + nvme_passthru_end(ctrl, ns, effects, cmd, ret); return ret; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a2d4f59e0535a..2dd52739236e1 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -1077,7 +1077,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); int nvme_execute_rq(struct request *rq, bool at_head); -void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects, +void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status); struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 511c980d538df..71a9c1cc57f59 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) blk_mq_free_request(rq); if (effects) - nvme_passthru_end(ctrl, effects, req->cmd, status); + nvme_passthru_end(ctrl, ns, effects, req->cmd, status); } static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq, From ea4d453b9ec9ea279c39744cd0ecb47ef48ede35 Mon Sep 17 00:00:00 2001 From: Uday Shankar Date: Thu, 25 May 2023 12:22:02 -0600 Subject: [PATCH 204/303] nvme: double KA polling frequency to avoid KATO with TBKAS on MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With TBKAS on, the completion of one command can defer sending a keep alive for up to twice the delay between successive runs of nvme_keep_alive_work. The current delay of KATO / 2 thus makes it possible for one command to defer sending a keep alive for up to KATO, which can result in the controller detecting a KATO. The following trace demonstrates the issue, taking KATO = 8 for simplicity: 1. t = 0: run nvme_keep_alive_work, no keep-alive sent 2. t = ε: I/O completion seen, set comp_seen = true 3. t = 4: run nvme_keep_alive_work, see comp_seen == true, skip sending keep-alive, set comp_seen = false 4. t = 8: run nvme_keep_alive_work, see comp_seen == false, send a keep-alive command. Here, there is a delay of 8 - ε between receiving a command completion and sending the next command. With ε small, the controller is likely to detect a keep alive timeout. Fix this by running nvme_keep_alive_work with a delay of KATO / 4 whenever TBKAS is on. Going through the above trace now gives us a worst-case delay of 4 - ε, which is in line with the recommendation of sending a command every KATO / 2 in the NVMe specification. Reported-by: Costa Sapuntzakis Reported-by: Randy Jennings Signed-off-by: Uday Shankar Reviewed-by: Hannes Reinecke Reviewed-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c6cba1cc101ca..6417c7928fa30 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1163,9 +1163,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ +static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl) +{ + unsigned long delay = ctrl->kato * HZ / 2; + + /* + * When using Traffic Based Keep Alive, we need to run + * nvme_keep_alive_work at twice the normal frequency, as one + * command completion can postpone sending a keep alive command + * by up to twice the delay between runs. + */ + if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) + delay /= 2; + return delay; +} + static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) { - queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2); + queue_delayed_work(nvme_wq, &ctrl->ka_work, + nvme_keep_alive_work_period(ctrl)); } static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, From 774a9636514764ddc0d072ae0d1d1c01a47e6ddd Mon Sep 17 00:00:00 2001 From: Uday Shankar Date: Thu, 25 May 2023 12:22:03 -0600 Subject: [PATCH 205/303] nvme: check IO start time when deciding to defer KA When a command completes, we set a flag which will skip sending a keep alive at the next run of nvme_keep_alive_work when TBKAS is on. However, if the command was submitted long ago, it's possible that the controller may have also restarted its keep alive timer (as a result of receiving the command) long ago. The following trace demonstrates the issue, assuming TBKAS is on and KATO = 8 for simplicity: 1. t = 0: submit I/O commands A, B, C, D, E 2. t = 0.5: commands A, B, C, D, E reach controller, restart its keep alive timer 3. t = 1: A completes 4. t = 2: run nvme_keep_alive_work, see recent completion, do nothing 5. t = 3: B completes 6. t = 4: run nvme_keep_alive_work, see recent completion, do nothing 7. t = 5: C completes 8. t = 6: run nvme_keep_alive_work, see recent completion, do nothing 9. t = 7: D completes 10. t = 8: run nvme_keep_alive_work, see recent completion, do nothing 11. t = 9: E completes At this point, 8.5 seconds have passed without restarting the controller's keep alive timer, so the controller will detect a keep alive timeout. Fix this by checking the IO start time when deciding to defer sending a keep alive command. Only set comp_seen if the command started after the most recent run of nvme_keep_alive_work. With this change, the completions of B, C, and D will not set comp_seen and the run of nvme_keep_alive_work at t = 4 will send a keep alive. Reported-by: Costa Sapuntzakis Reported-by: Randy Jennings Signed-off-by: Uday Shankar Reviewed-by: Hannes Reinecke Reviewed-by: Sagi Grimberg Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 14 +++++++++++++- drivers/nvme/host/nvme.h | 1 + 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 6417c7928fa30..dbfca1b925f8a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req) trace_nvme_complete_rq(req); nvme_cleanup_cmd(req); - if (ctrl->kas) + /* + * Completions of long-running commands should not be able to + * defer sending of periodic keep alives, since the controller + * may have completed processing such commands a long time ago + * (arbitrarily close to command submission time). + * req->deadline - req->timeout is the command submission time + * in jiffies. + */ + if (ctrl->kas && + req->deadline - req->timeout >= ctrl->ka_last_check_time) ctrl->comp_seen = true; switch (nvme_decide_disposition(req)) { @@ -1200,6 +1209,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, return RQ_END_IO_NONE; } + ctrl->ka_last_check_time = jiffies; ctrl->comp_seen = false; spin_lock_irqsave(&ctrl->lock, flags); if (ctrl->state == NVME_CTRL_LIVE || @@ -1218,6 +1228,8 @@ static void nvme_keep_alive_work(struct work_struct *work) bool comp_seen = ctrl->comp_seen; struct request *rq; + ctrl->ka_last_check_time = jiffies; + if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { dev_dbg(ctrl->device, "reschedule traffic based keep-alive timer\n"); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 2dd52739236e1..8657811f8b887 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -328,6 +328,7 @@ struct nvme_ctrl { struct delayed_work ka_work; struct delayed_work failfast_work; struct nvme_command ka_cmd; + unsigned long ka_last_check_time; struct work_struct fw_act_work; unsigned long events; From db3e33dd8bd956f165436afdbdbf1c653fb3c8e6 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Sun, 28 May 2023 16:00:41 -0700 Subject: [PATCH 206/303] module: fix module load for ia64 Frank reported boot regression in ia64 as: ELILO v3.16 for EFI/IA-64 .. Uncompressing Linux... done Loading file AC100221.initrd.img...done [ 0.000000] Linux version 6.4.0-rc3 (root@x4270) (ia64-linux-gcc (GCC) 12.2.0, GNU ld (GNU Binutils) 2.39) #1 SMP Thu May 25 15:52:20 CEST 2023 [ 0.000000] efi: EFI v1.1 by HP [ 0.000000] efi: SALsystab=0x3ee7a000 ACPI 2.0=0x3fe2a000 ESI=0x3ee7b000 SMBIOS=0x3ee7c000 HCDP=0x3fe28000 [ 0.000000] PCDP: v3 at 0x3fe28000 [ 0.000000] earlycon: uart8250 at MMIO 0x00000000f4050000 (options '9600n8') [ 0.000000] printk: bootconsole [uart8250] enabled [ 0.000000] ACPI: Early table checksum verification disabled [ 0.000000] ACPI: RSDP 0x000000003FE2A000 000028 (v02 HP ) [ 0.000000] ACPI: XSDT 0x000000003FE2A02C 0000CC (v01 HP rx2620 00000000 HP 00000000) [...] [ 3.793350] Run /init as init process Loading, please wait... Starting systemd-udevd version 252.6-1 [ 3.951100] ------------[ cut here ]------------ [ 3.951100] WARNING: CPU: 6 PID: 140 at kernel/module/main.c:1547 __layout_sections+0x370/0x3c0 [ 3.949512] Unable to handle kernel paging request at virtual address 1000000000000000 [ 3.951100] Modules linked in: [ 3.951100] CPU: 6 PID: 140 Comm: (udev-worker) Not tainted 6.4.0-rc3 #1 [ 3.956161] (udev-worker)[142]: Oops 11003706212352 [1] [ 3.951774] Hardware name: hp server rx2620 , BIOS 04.29 11/30/2007 [ 3.951774] [ 3.951774] Call Trace: [ 3.958339] Unable to handle kernel paging request at virtual address 1000000000000000 [ 3.956161] Modules linked in: [ 3.951774] [] show_stack.part.0+0x30/0x60 [ 3.951774] sp=e000000183a67b20 bsp=e000000183a61628 [ 3.956161] [ 3.956161] which bisect to module_memory change [1]. Debug showed that ia64 uses some special sections: __layout_sections: section .got (sh_flags 10000002) matched to MOD_INVALID __layout_sections: section .sdata (sh_flags 10000003) matched to MOD_INVALID __layout_sections: section .sbss (sh_flags 10000003) matched to MOD_INVALID All these sections are loaded to module core memory before [1]. Fix ia64 boot by loading these sections to MOD_DATA (core rw data). [1] commit ac3b43283923 ("module: replace module_layout with module_memory") Fixes: ac3b43283923 ("module: replace module_layout with module_memory") Reported-by: Frank Scheiner Closes: https://lists.debian.org/debian-ia64/2023/05/msg00010.html Closes: https://marc.info/?l=linux-ia64&m=168509859125505 Cc: Linus Torvalds Signed-off-by: Song Liu Tested-by: John Paul Adrian Glaubitz Signed-off-by: Luis Chamberlain --- kernel/module/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/module/main.c b/kernel/module/main.c index 044aa2c9e3cb0..4e2cf784cf8ce 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1521,14 +1521,14 @@ static void __layout_sections(struct module *mod, struct load_info *info, bool i MOD_RODATA, MOD_RO_AFTER_INIT, MOD_DATA, - MOD_INVALID, /* This is needed to match the masks array */ + MOD_DATA, }; static const int init_m_to_mem_type[] = { MOD_INIT_TEXT, MOD_INIT_RODATA, MOD_INVALID, MOD_INIT_DATA, - MOD_INVALID, /* This is needed to match the masks array */ + MOD_INIT_DATA, }; for (m = 0; m < ARRAY_SIZE(masks); ++m) { From d00394e1395be9e27f58d58a61642a2a5df10405 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sun, 19 Mar 2023 00:53:38 +0100 Subject: [PATCH 207/303] fbdev: au1100fb: Drop if with an always false condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The driver core never calls a remove callback with the platform_device pointer being NULL. So the check for this condition can just be dropped. Signed-off-by: Uwe Kleine-König Signed-off-by: Helge Deller --- drivers/video/fbdev/au1100fb.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 519313b8bb004..cd27e3b81bb82 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -524,9 +524,6 @@ int au1100fb_drv_remove(struct platform_device *dev) { struct au1100fb_device *fbdev = NULL; - if (!dev) - return -ENODEV; - fbdev = platform_get_drvdata(dev); #if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO) From 4369e38a6f1978b48f9200d2a69ce4658db76047 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sun, 19 Mar 2023 00:53:39 +0100 Subject: [PATCH 208/303] fbdev: arcfb: Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is (mostly) ignored and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. Trivially convert this driver from always returning zero in the remove callback to the void returning variant. Signed-off-by: Uwe Kleine-König Signed-off-by: Helge Deller --- drivers/video/fbdev/arcfb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c index 024d0ee4f04f9..08d15e4084130 100644 --- a/drivers/video/fbdev/arcfb.c +++ b/drivers/video/fbdev/arcfb.c @@ -590,7 +590,7 @@ static int arcfb_probe(struct platform_device *dev) return retval; } -static int arcfb_remove(struct platform_device *dev) +static void arcfb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); @@ -601,12 +601,11 @@ static int arcfb_remove(struct platform_device *dev) vfree((void __force *)info->screen_base); framebuffer_release(info); } - return 0; } static struct platform_driver arcfb_driver = { .probe = arcfb_probe, - .remove = arcfb_remove, + .remove_new = arcfb_remove, .driver = { .name = "arcfb", }, From bf76544d4515ac543cb9e5c666eebf3866e662a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sun, 19 Mar 2023 00:53:40 +0100 Subject: [PATCH 209/303] fbdev: au1100fb: Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is (mostly) ignored and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. Trivially convert this driver from always returning zero in the remove callback to the void returning variant. Signed-off-by: Uwe Kleine-König Signed-off-by: Helge Deller --- drivers/video/fbdev/au1100fb.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index cd27e3b81bb82..648d6cac86e8f 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -520,7 +520,7 @@ static int au1100fb_drv_probe(struct platform_device *dev) return -ENODEV; } -int au1100fb_drv_remove(struct platform_device *dev) +void au1100fb_drv_remove(struct platform_device *dev) { struct au1100fb_device *fbdev = NULL; @@ -540,8 +540,6 @@ int au1100fb_drv_remove(struct platform_device *dev) clk_disable_unprepare(fbdev->lcdclk); clk_put(fbdev->lcdclk); } - - return 0; } #ifdef CONFIG_PM @@ -590,9 +588,9 @@ static struct platform_driver au1100fb_driver = { .name = "au1100-lcd", }, .probe = au1100fb_drv_probe, - .remove = au1100fb_drv_remove, + .remove_new = au1100fb_drv_remove, .suspend = au1100fb_drv_suspend, - .resume = au1100fb_drv_resume, + .resume = au1100fb_drv_resume, }; module_platform_driver(au1100fb_driver); From 4b88b6e1c4ec832b02a65b51489692b58d2c6f84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sun, 19 Mar 2023 00:53:41 +0100 Subject: [PATCH 210/303] fbdev: au1200fb: Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is (mostly) ignored and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. Trivially convert this driver from always returning zero in the remove callback to the void returning variant. Signed-off-by: Uwe Kleine-König Signed-off-by: Helge Deller --- drivers/video/fbdev/au1200fb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index b6b22fa4a8a01..aed88ce45bf09 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1765,7 +1765,7 @@ static int au1200fb_drv_probe(struct platform_device *dev) return ret; } -static int au1200fb_drv_remove(struct platform_device *dev) +static void au1200fb_drv_remove(struct platform_device *dev) { struct au1200fb_platdata *pd = platform_get_drvdata(dev); struct fb_info *fbi; @@ -1788,8 +1788,6 @@ static int au1200fb_drv_remove(struct platform_device *dev) } free_irq(platform_get_irq(dev, 0), (void *)dev); - - return 0; } #ifdef CONFIG_PM @@ -1840,7 +1838,7 @@ static struct platform_driver au1200fb_driver = { .pm = AU1200FB_PMOPS, }, .probe = au1200fb_drv_probe, - .remove = au1200fb_drv_remove, + .remove_new = au1200fb_drv_remove, }; module_platform_driver(au1200fb_driver); From a3ce8c8ffb6fdee842b703da78bd81eac0f16d71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sun, 19 Mar 2023 00:53:42 +0100 Subject: [PATCH 211/303] fbdev: broadsheetfb: Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is (mostly) ignored and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. Trivially convert this driver from always returning zero in the remove callback to the void returning variant. Signed-off-by: Uwe Kleine-König Signed-off-by: Helge Deller --- drivers/video/fbdev/broadsheetfb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 55e62dd96f9be..b518cacbf7cd9 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -1193,7 +1193,7 @@ static int broadsheetfb_probe(struct platform_device *dev) } -static int broadsheetfb_remove(struct platform_device *dev) +static void broadsheetfb_remove(struct platform_device *dev) { struct fb_info *info = platform_get_drvdata(dev); @@ -1209,12 +1209,11 @@ static int broadsheetfb_remove(struct platform_device *dev) module_put(par->board->owner); framebuffer_release(info); } - return 0; } static struct platform_driver broadsheetfb_driver = { .probe = broadsheetfb_probe, - .remove = broadsheetfb_remove, + .remove_new = broadsheetfb_remove, .driver = { .name = "broadsheetfb", }, From d19663edc91de65ae85eea9902addc9d04b0ceb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sun, 19 Mar 2023 00:53:43 +0100 Subject: [PATCH 212/303] fbdev: bw2: Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is (mostly) ignored and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. Trivially convert this driver from always returning zero in the remove callback to the void returning variant. Signed-off-by: Uwe Kleine-König Signed-off-by: Helge Deller --- drivers/video/fbdev/bw2.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/video/fbdev/bw2.c b/drivers/video/fbdev/bw2.c index 9cbadcd18b256..025d663dc6fdc 100644 --- a/drivers/video/fbdev/bw2.c +++ b/drivers/video/fbdev/bw2.c @@ -352,7 +352,7 @@ static int bw2_probe(struct platform_device *op) return err; } -static int bw2_remove(struct platform_device *op) +static void bw2_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct bw2_par *par = info->par; @@ -363,8 +363,6 @@ static int bw2_remove(struct platform_device *op) of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); framebuffer_release(info); - - return 0; } static const struct of_device_id bw2_match[] = { @@ -381,7 +379,7 @@ static struct platform_driver bw2_driver = { .of_match_table = bw2_match, }, .probe = bw2_probe, - .remove = bw2_remove, + .remove_new = bw2_remove, }; static int __init bw2_init(void) From b928dfdcb27d8fa59917b794cfba53052a2f050f Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Tue, 23 May 2023 23:49:49 -0400 Subject: [PATCH 213/303] ext4: set lockdep subclass for the ea_inode in ext4_xattr_inode_cache_find() If the ea_inode has been pushed out of the inode cache while there is still a reference in the mb_cache, the lockdep subclass will not be set on the inode, which can lead to some lockdep false positives. Fixes: 33d201e0277b ("ext4: fix lockdep warning about recursive inode locking") Cc: stable@kernel.org Reported-by: syzbot+d4b971e744b1f5439336@syzkaller.appspotmail.com Signed-off-by: Theodore Ts'o Link: https://lore.kernel.org/r/20230524034951.779531-3-tytso@mit.edu Signed-off-by: Theodore Ts'o --- fs/ext4/xattr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index a27208129a800..ff7ab63c5b4f0 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1539,6 +1539,7 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value, EXT4_IGET_EA_INODE); if (IS_ERR(ea_inode)) goto next_entry; + ext4_xattr_inode_set_class(ea_inode); if (i_size_read(ea_inode) == value_len && !ext4_xattr_inode_read(ea_inode, ea_data, value_len) && !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data, From 2bc7e7c1a3bc9bd0cbf0f71006f6fe7ef24a00c2 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Tue, 23 May 2023 23:49:50 -0400 Subject: [PATCH 214/303] ext4: disallow ea_inodes with extended attributes An ea_inode stores the value of an extended attribute; it can not have extended attributes itself, or this will cause recursive nightmares. Add a check in ext4_iget() to make sure this is the case. Cc: stable@kernel.org Reported-by: syzbot+e44749b6ba4d0434cd47@syzkaller.appspotmail.com Signed-off-by: Theodore Ts'o Link: https://lore.kernel.org/r/20230524034951.779531-4-tytso@mit.edu Signed-off-by: Theodore Ts'o --- fs/ext4/inode.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 258f3cbed347c..02de439bf1f04 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4647,6 +4647,9 @@ static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags) if (flags & EXT4_IGET_EA_INODE) { if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) return "missing EA_INODE flag"; + if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) || + EXT4_I(inode)->i_file_acl) + return "ea_inode with extended attributes"; } else { if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) return "unexpected EA_INODE flag"; From aff3bea95388299eec63440389b4545c8041b357 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Tue, 23 May 2023 23:49:51 -0400 Subject: [PATCH 215/303] ext4: add lockdep annotations for i_data_sem for ea_inode's Treat i_data_sem for ea_inodes as being in their own lockdep class to avoid lockdep complaints about ext4_setattr's use of inode_lock() on normal inodes potentially causing lock ordering with i_data_sem on ea_inodes in ext4_xattr_inode_write(). However, ea_inodes will be operated on by ext4_setattr(), so this isn't a problem. Cc: stable@kernel.org Link: https://syzkaller.appspot.com/bug?extid=298c5d8fb4a128bc27b0 Reported-by: syzbot+298c5d8fb4a128bc27b0@syzkaller.appspotmail.com Signed-off-by: Theodore Ts'o Link: https://lore.kernel.org/r/20230524034951.779531-5-tytso@mit.edu Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 2 ++ fs/ext4/xattr.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 9525c52b78dc5..8104a21b001ae 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -918,11 +918,13 @@ do { \ * where the second inode has larger inode number * than the first * I_DATA_SEM_QUOTA - Used for quota inodes only + * I_DATA_SEM_EA - Used for ea_inodes only */ enum { I_DATA_SEM_NORMAL = 0, I_DATA_SEM_OTHER, I_DATA_SEM_QUOTA, + I_DATA_SEM_EA }; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index ff7ab63c5b4f0..13d7f17a9c8cc 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -121,7 +121,11 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array, #ifdef CONFIG_LOCKDEP void ext4_xattr_inode_set_class(struct inode *ea_inode) { + struct ext4_inode_info *ei = EXT4_I(ea_inode); + lockdep_set_subclass(&ea_inode->i_rwsem, 1); + (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ + lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA); } #endif From 1077b2d53ef53629c14106aecf633bebd286c04c Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 24 May 2023 12:44:53 +0200 Subject: [PATCH 216/303] ext4: fix fsync for non-directories Commit e360c6ed7274 ("ext4: Drop special handling of journalled data from ext4_sync_file()") simplified ext4_sync_file() by dropping special handling of journalled data mode as it was not needed anymore. However that branch was also used for directories and symlinks and since the fastcommit code does not track metadata changes to non-regular files, the change has caused e.g. fsync(2) on directories to not commit transaction as it should. Fix the problem by adding handling for non-regular files. Fixes: e360c6ed7274 ("ext4: Drop special handling of journalled data from ext4_sync_file()") Reported-by: Eric Whitney Link: https://lore.kernel.org/all/ZFqO3xVnmhL7zv1x@debian-BULLSEYE-live-builder-AMD64 Signed-off-by: Jan Kara Tested-by: Eric Whitney Link: https://lore.kernel.org/r/20230524104453.8734-1-jack@suse.cz Signed-off-by: Theodore Ts'o --- fs/ext4/fsync.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index f65fdb27ce141..2a143209aa0cb 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -108,6 +108,13 @@ static int ext4_fsync_journal(struct inode *inode, bool datasync, journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; tid_t commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; + /* + * Fastcommit does not really support fsync on directories or other + * special files. Force a full commit. + */ + if (!S_ISREG(inode->i_mode)) + return ext4_force_commit(inode->i_sb); + if (journal->j_flags & JBD2_BARRIER && !jbd2_trans_will_send_data_barrier(journal, commit_tid)) *needs_barrier = true; From eb1f822c76beeaa76ab8b6737ab9dc9f9798408c Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 26 May 2023 23:57:29 -0400 Subject: [PATCH 217/303] ext4: enable the lazy init thread when remounting read/write In commit a44be64bbecb ("ext4: don't clear SB_RDONLY when remounting r/w until quota is re-enabled") we defer clearing tyhe SB_RDONLY flag in struct super. However, we didn't defer when we checked sb_rdonly() to determine the lazy itable init thread should be enabled, with the next result that the lazy inode table initialization would not be properly started. This can cause generic/231 to fail in ext4's nojournal mode. Fix this by moving when we decide to start or stop the lazy itable init thread to after we clear the SB_RDONLY flag when we are remounting the file system read/write. Fixes a44be64bbecb ("ext4: don't clear SB_RDONLY when remounting r/w until...") Signed-off-by: Theodore Ts'o Link: https://lore.kernel.org/r/20230527035729.1001605-1-tytso@mit.edu Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9680fe753e599..56a5d1c469fc9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -6588,18 +6588,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) } } - /* - * Reinitialize lazy itable initialization thread based on - * current settings - */ - if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) - ext4_unregister_li_request(sb); - else { - ext4_group_t first_not_zeroed; - first_not_zeroed = ext4_has_uninit_itable(sb); - ext4_register_li_request(sb, first_not_zeroed); - } - /* * Handle creation of system zone data early because it can fail. * Releasing of existing data is done when we are sure remount will @@ -6637,6 +6625,18 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) if (enable_rw) sb->s_flags &= ~SB_RDONLY; + /* + * Reinitialize lazy itable initialization thread based on + * current settings + */ + if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) + ext4_unregister_li_request(sb); + else { + ext4_group_t first_not_zeroed; + first_not_zeroed = ext4_has_uninit_itable(sb); + ext4_register_li_request(sb, first_not_zeroed); + } + if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) ext4_stop_mmpd(sbi); From c7275ce6a5fd32ca9f5a6294ed89cf0523181af9 Mon Sep 17 00:00:00 2001 From: Uday Shankar Date: Thu, 25 May 2023 12:22:04 -0600 Subject: [PATCH 218/303] nvme: improve handling of long keep alives MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Upon keep alive completion, nvme_keep_alive_work is scheduled with the same delay every time. If keep alive commands are completing slowly, this may cause a keep alive timeout. The following trace illustrates the issue, taking KATO = 8 and TBKAS off for simplicity: 1. t = 0: run nvme_keep_alive_work, send keep alive 2. t = ε: keep alive reaches controller, controller restarts its keep alive timer 3. t = 4: host receives keep alive completion, schedules nvme_keep_alive_work with delay 4 4. t = 8: run nvme_keep_alive_work, send keep alive Here, a keep alive having RTT of 4 causes a delay of at least 8 - ε between the controller receiving successive keep alives. With ε small, the controller is likely to detect a keep alive timeout. Fix this by calculating the RTT of the keep alive command, and adjusting the scheduling delay of the next keep alive work accordingly. Reported-by: Costa Sapuntzakis Reported-by: Randy Jennings Signed-off-by: Uday Shankar Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index dbfca1b925f8a..3ec38e2b91732 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1199,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, struct nvme_ctrl *ctrl = rq->end_io_data; unsigned long flags; bool startka = false; + unsigned long rtt = jiffies - (rq->deadline - rq->timeout); + unsigned long delay = nvme_keep_alive_work_period(ctrl); + + /* + * Subtract off the keepalive RTT so nvme_keep_alive_work runs + * at the desired frequency. + */ + if (rtt <= delay) { + delay -= rtt; + } else { + dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n", + jiffies_to_msecs(rtt)); + delay = 0; + } blk_mq_free_request(rq); @@ -1217,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, startka = true; spin_unlock_irqrestore(&ctrl->lock, flags); if (startka) - nvme_queue_keep_alive_work(ctrl); + queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); return RQ_END_IO_NONE; } From c7cfbd115001f94de9e4053657946a383147e803 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Mon, 29 May 2023 12:52:55 -0700 Subject: [PATCH 219/303] net/sched: sch_ingress: Only create under TC_H_INGRESS ingress Qdiscs are only supposed to be created under TC_H_INGRESS. Return -EOPNOTSUPP if 'parent' is not TC_H_INGRESS, similar to mq_init(). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot+b53a9c0d1ea4ad62da8b@syzkaller.appspotmail.com Closes: https://lore.kernel.org/r/0000000000006cf87705f79acf1a@google.com/ Tested-by: Pedro Tammela Acked-by: Jamal Hadi Salim Reviewed-by: Jamal Hadi Salim Reviewed-by: Vlad Buslov Signed-off-by: Peilin Ye Signed-off-by: Jakub Kicinski --- net/sched/sch_ingress.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 84838128b9c5b..f9ef6deb27709 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -80,6 +80,9 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt, struct net_device *dev = qdisc_dev(sch); int err; + if (sch->parent != TC_H_INGRESS) + return -EOPNOTSUPP; + net_inc_ingress_queue(); mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); @@ -101,6 +104,9 @@ static void ingress_destroy(struct Qdisc *sch) { struct ingress_sched_data *q = qdisc_priv(sch); + if (sch->parent != TC_H_INGRESS) + return; + tcf_block_put_ext(q->block, sch, &q->block_info); net_dec_ingress_queue(); } From 5eeebfe6c493192b10d516abfd72742900f2a162 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Mon, 29 May 2023 12:53:21 -0700 Subject: [PATCH 220/303] net/sched: sch_clsact: Only create under TC_H_CLSACT clsact Qdiscs are only supposed to be created under TC_H_CLSACT (which equals TC_H_INGRESS). Return -EOPNOTSUPP if 'parent' is not TC_H_CLSACT. Fixes: 1f211a1b929c ("net, sched: add clsact qdisc") Tested-by: Pedro Tammela Acked-by: Jamal Hadi Salim Reviewed-by: Jamal Hadi Salim Reviewed-by: Vlad Buslov Signed-off-by: Peilin Ye Signed-off-by: Jakub Kicinski --- net/sched/sch_ingress.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index f9ef6deb27709..35963929e1178 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -225,6 +225,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, struct net_device *dev = qdisc_dev(sch); int err; + if (sch->parent != TC_H_CLSACT) + return -EOPNOTSUPP; + net_inc_ingress_queue(); net_inc_egress_queue(); @@ -254,6 +257,9 @@ static void clsact_destroy(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); + if (sch->parent != TC_H_CLSACT) + return; + tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); From f85fa45d4a9408d98c46c8fa45ba2e3b2f4bf219 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Mon, 29 May 2023 12:54:03 -0700 Subject: [PATCH 221/303] net/sched: Reserve TC_H_INGRESS (TC_H_CLSACT) for ingress (clsact) Qdiscs Currently it is possible to add e.g. an HTB Qdisc under ffff:fff1 (TC_H_INGRESS, TC_H_CLSACT): $ ip link add name ifb0 type ifb $ tc qdisc add dev ifb0 parent ffff:fff1 htb $ tc qdisc add dev ifb0 clsact Error: Exclusivity flag on, cannot modify. $ drgn ... >>> ifb0 = netdev_get_by_name(prog, "ifb0") >>> qdisc = ifb0.ingress_queue.qdisc_sleeping >>> print(qdisc.ops.id.string_().decode()) htb >>> qdisc.flags.value_() # TCQ_F_INGRESS 2 Only allow ingress and clsact Qdiscs under ffff:fff1. Return -EINVAL for everything else. Make TCQ_F_INGRESS a static flag of ingress and clsact Qdiscs. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Fixes: 1f211a1b929c ("net, sched: add clsact qdisc") Tested-by: Pedro Tammela Acked-by: Jamal Hadi Salim Reviewed-by: Jamal Hadi Salim Reviewed-by: Vlad Buslov Signed-off-by: Peilin Ye Signed-off-by: Jakub Kicinski --- net/sched/sch_api.c | 7 ++++++- net/sched/sch_ingress.c | 4 ++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index fdb8f429333d2..383195955b7d2 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1252,7 +1252,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev, sch->parent = parent; if (handle == TC_H_INGRESS) { - sch->flags |= TCQ_F_INGRESS; + if (!(sch->flags & TCQ_F_INGRESS)) { + NL_SET_ERR_MSG(extack, + "Specified parent ID is reserved for ingress and clsact Qdiscs"); + err = -EINVAL; + goto err_out3; + } handle = TC_H_MAKE(TC_H_INGRESS, 0); } else { if (handle == 0) { diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 35963929e1178..e43a454993723 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -140,7 +140,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = { .cl_ops = &ingress_class_ops, .id = "ingress", .priv_size = sizeof(struct ingress_sched_data), - .static_flags = TCQ_F_CPUSTATS, + .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = ingress_init, .destroy = ingress_destroy, .dump = ingress_dump, @@ -281,7 +281,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = { .cl_ops = &clsact_class_ops, .id = "clsact", .priv_size = sizeof(struct clsact_sched_data), - .static_flags = TCQ_F_CPUSTATS, + .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = clsact_init, .destroy = clsact_destroy, .dump = ingress_dump, From 9de95df5d15baa956c2b70b9e794842e790a8a13 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Mon, 29 May 2023 12:54:26 -0700 Subject: [PATCH 222/303] net/sched: Prohibit regrafting ingress or clsact Qdiscs Currently, after creating an ingress (or clsact) Qdisc and grafting it under TC_H_INGRESS (TC_H_CLSACT), it is possible to graft it again under e.g. a TBF Qdisc: $ ip link add ifb0 type ifb $ tc qdisc add dev ifb0 handle 1: root tbf rate 20kbit buffer 1600 limit 3000 $ tc qdisc add dev ifb0 clsact $ tc qdisc link dev ifb0 handle ffff: parent 1:1 $ tc qdisc show dev ifb0 qdisc tbf 1: root refcnt 2 rate 20Kbit burst 1600b lat 560.0ms qdisc clsact ffff: parent ffff:fff1 refcnt 2 ^^^^^^^^ clsact's refcount has increased: it is now grafted under both TC_H_CLSACT and 1:1. ingress and clsact Qdiscs should only be used under TC_H_INGRESS (TC_H_CLSACT). Prohibit regrafting them. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Fixes: 1f211a1b929c ("net, sched: add clsact qdisc") Tested-by: Pedro Tammela Acked-by: Jamal Hadi Salim Reviewed-by: Jamal Hadi Salim Reviewed-by: Vlad Buslov Signed-off-by: Peilin Ye Signed-off-by: Jakub Kicinski --- net/sched/sch_api.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 383195955b7d2..49b9c1bbfdd97 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1596,6 +1596,11 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Invalid qdisc name"); return -EINVAL; } + if (q->flags & TCQ_F_INGRESS) { + NL_SET_ERR_MSG(extack, + "Cannot regraft ingress or clsact Qdiscs"); + return -EINVAL; + } if (q == p || (p && check_loop(q, p, 0))) { NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected"); From 36eec020fab668719b541f34d97f44e232ffa165 Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Sat, 27 May 2023 17:37:47 +0800 Subject: [PATCH 223/303] net: sched: fix NULL pointer dereference in mq_attach When use the following command to test: 1)ip link add bond0 type bond 2)ip link set bond0 up 3)tc qdisc add dev bond0 root handle ffff: mq 4)tc qdisc replace dev bond0 parent ffff:fff1 handle ffff: mq The kernel reports NULL pointer dereference issue. The stack information is as follows: Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 Internal error: Oops: 0000000096000006 [#1] SMP Modules linked in: pstate: 20000005 (nzCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : mq_attach+0x44/0xa0 lr : qdisc_graft+0x20c/0x5cc sp : ffff80000e2236a0 x29: ffff80000e2236a0 x28: ffff0000c0e59d80 x27: ffff0000c0be19c0 x26: ffff0000cae3e800 x25: 0000000000000010 x24: 00000000fffffff1 x23: 0000000000000000 x22: ffff0000cae3e800 x21: ffff0000c9df4000 x20: ffff0000c9df4000 x19: 0000000000000000 x18: ffff80000a934000 x17: ffff8000f5b56000 x16: ffff80000bb08000 x15: 0000000000000000 x14: 0000000000000000 x13: 6b6b6b6b6b6b6b6b x12: 6b6b6b6b00000001 x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000 x8 : ffff0000c0be0730 x7 : bbbbbbbbbbbbbbbb x6 : 0000000000000008 x5 : ffff0000cae3e864 x4 : 0000000000000000 x3 : 0000000000000001 x2 : 0000000000000001 x1 : ffff8000090bc23c x0 : 0000000000000000 Call trace: mq_attach+0x44/0xa0 qdisc_graft+0x20c/0x5cc tc_modify_qdisc+0x1c4/0x664 rtnetlink_rcv_msg+0x354/0x440 netlink_rcv_skb+0x64/0x144 rtnetlink_rcv+0x28/0x34 netlink_unicast+0x1e8/0x2a4 netlink_sendmsg+0x308/0x4a0 sock_sendmsg+0x64/0xac ____sys_sendmsg+0x29c/0x358 ___sys_sendmsg+0x90/0xd0 __sys_sendmsg+0x7c/0xd0 __arm64_sys_sendmsg+0x2c/0x38 invoke_syscall+0x54/0x114 el0_svc_common.constprop.1+0x90/0x174 do_el0_svc+0x3c/0xb0 el0_svc+0x24/0xec el0t_64_sync_handler+0x90/0xb4 el0t_64_sync+0x174/0x178 This is because when mq is added for the first time, qdiscs in mq is set to NULL in mq_attach(). Therefore, when replacing mq after adding mq, we need to initialize qdiscs in the mq before continuing to graft. Otherwise, it will couse NULL pointer dereference issue in mq_attach(). And the same issue will occur in the attach functions of mqprio, taprio and htb. ffff:fff1 means that the repalce qdisc is ingress. Ingress does not allow any qdisc to be attached. Therefore, ffff:fff1 is incorrectly used, and the command should be dropped. Fixes: 6ec1c69a8f64 ("net_sched: add classful multiqueue dummy scheduler") Signed-off-by: Zhengchao Shao Tested-by: Peilin Ye Acked-by: Jamal Hadi Salim Link: https://lore.kernel.org/r/20230527093747.3583502-1-shaozhengchao@huawei.com Signed-off-by: Jakub Kicinski --- net/sched/sch_api.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 49b9c1bbfdd97..014209b1dd584 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1606,6 +1606,10 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected"); return -ELOOP; } + if (clid == TC_H_INGRESS) { + NL_SET_ERR_MSG(extack, "Ingress cannot graft directly"); + return -EINVAL; + } qdisc_refcount_inc(q); goto graft; } else { From f6a27d6dc51b288106adaf053cff9c9b9cc12c4e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 30 May 2023 19:32:13 +0000 Subject: [PATCH 224/303] KVM: arm64: Drop last page ref in kvm_pgtable_stage2_free_removed() The reference count on page table allocations is increased for every 'counted' PTE (valid or donated) in the table in addition to the initial reference from ->zalloc_page(). kvm_pgtable_stage2_free_removed() fails to drop the last reference on the root of the table walk, meaning we leak memory. Fix it by dropping the last reference after the free walker returns, at which point all references for 'counted' PTEs have been released. Cc: stable@vger.kernel.org Fixes: 5c359cca1faf ("KVM: arm64: Tear down unlinked stage-2 subtree after break-before-make") Reported-by: Yu Zhao Signed-off-by: Oliver Upton Tested-by: Yu Zhao Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230530193213.1663411-1-oliver.upton@linux.dev --- arch/arm64/kvm/hyp/pgtable.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index e1eacffbc41f4..95dae02ccc2e6 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1332,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg }; WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1)); + + WARN_ON(mm_ops->page_count(pgtable) != 1); + mm_ops->put_page(pgtable); } From f4e4534850a9d18c250a93f8d7fbb51310828110 Mon Sep 17 00:00:00 2001 From: Pedro Tammela Date: Mon, 29 May 2023 12:33:35 -0300 Subject: [PATCH 225/303] net/netlink: fix NETLINK_LIST_MEMBERSHIPS length report The current code for the length calculation wrongly truncates the reported length of the groups array, causing an under report of the subscribed groups. To fix this, use 'BITS_TO_BYTES()' which rounds up the division by 8. Fixes: b42be38b2778 ("netlink: add API to retrieve all group memberships") Signed-off-by: Pedro Tammela Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230529153335.389815-1-pctammela@mojatatu.com Signed-off-by: Jakub Kicinski --- net/netlink/af_netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index c87804112d0c6..3a1e0fd5bf149 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1779,7 +1779,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname, break; } } - if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen)) + if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen)) err = -EFAULT; netlink_unlock_table(); return err; From 1c913a1c35aa61cf280173b2bcc133c3953c38fc Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 25 May 2023 21:27:21 +0000 Subject: [PATCH 226/303] KVM: arm64: Iterate arm_pmus list to probe for default PMU To date KVM has relied on using a perf event to probe the core PMU at the time of vPMU initialization. Behind the scenes perf_event_init() would iteratively walk the PMUs of the system and return the PMU that could handle the event. However, an upcoming change in perf core will drop the iterative walk, thereby breaking the fragile dance we do on the KVM side. Avoid the problem altogether by iterating over the list of supported PMUs maintained in KVM, returning the core PMU that matches the CPU we were called on. Tested-by: Nathan Chancellor Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230525212723.3361524-2-oliver.upton@linux.dev --- arch/arm64/kvm/pmu-emul.c | 46 ++++++++++----------------------------- 1 file changed, 12 insertions(+), 34 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 45727d50d18dd..5deddc49e7454 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -694,45 +694,23 @@ void kvm_host_pmu_init(struct arm_pmu *pmu) static struct arm_pmu *kvm_pmu_probe_armpmu(void) { - struct perf_event_attr attr = { }; - struct perf_event *event; - struct arm_pmu *pmu = NULL; - - /* - * Create a dummy event that only counts user cycles. As we'll never - * leave this function with the event being live, it will never - * count anything. But it allows us to probe some of the PMU - * details. Yes, this is terrible. - */ - attr.type = PERF_TYPE_RAW; - attr.size = sizeof(attr); - attr.pinned = 1; - attr.disabled = 0; - attr.exclude_user = 0; - attr.exclude_kernel = 1; - attr.exclude_hv = 1; - attr.exclude_host = 1; - attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; - attr.sample_period = GENMASK(63, 0); + struct arm_pmu *tmp, *pmu = NULL; + struct arm_pmu_entry *entry; + int cpu; - event = perf_event_create_kernel_counter(&attr, -1, current, - kvm_pmu_perf_overflow, &attr); + mutex_lock(&arm_pmus_lock); - if (IS_ERR(event)) { - pr_err_once("kvm: pmu event creation failed %ld\n", - PTR_ERR(event)); - return NULL; - } + cpu = smp_processor_id(); + list_for_each_entry(entry, &arm_pmus, entry) { + tmp = entry->arm_pmu; - if (event->pmu) { - pmu = to_arm_pmu(event->pmu); - if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI || - pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) - pmu = NULL; + if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) { + pmu = tmp; + break; + } } - perf_event_disable(event); - perf_event_release_kernel(event); + mutex_unlock(&arm_pmus_lock); return pmu; } From 40e54cad454076172cc3e2bca60aa924650a3e4b Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 25 May 2023 21:27:22 +0000 Subject: [PATCH 227/303] KVM: arm64: Document default vPMU behavior on heterogeneous systems KVM maintains a mask of supported CPUs when a vPMU type is explicitly selected by userspace and is used to reject any attempt to run the vCPU on an unsupported CPU. This is great, but we're still beholden to the default behavior where vCPUs can be scheduled anywhere and guest counters may silently stop working. Avoid confusing the next poor sod to look at this code and document the intended behavior. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230525212723.3361524-3-oliver.upton@linux.dev --- arch/arm64/kvm/pmu-emul.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 5deddc49e7454..491ca7eb2a4c6 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -890,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return -EBUSY; if (!kvm->arch.arm_pmu) { - /* No PMU set, get the default one */ + /* + * No PMU set, get the default one. + * + * The observant among you will notice that the supported_cpus + * mask does not get updated for the default PMU even though it + * is quite possible the selected instance supports only a + * subset of cores in the system. This is intentional, and + * upholds the preexisting behavior on heterogeneous systems + * where vCPUs can be scheduled on any core but the guest + * counters could stop working. + */ kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); if (!kvm->arch.arm_pmu) return -ENODEV; From 448a5ce1120c5bdbce1f1ccdabcd31c7d029f328 Mon Sep 17 00:00:00 2001 From: Vladislav Efanov Date: Tue, 30 May 2023 14:39:41 +0300 Subject: [PATCH 228/303] udp6: Fix race condition in udp6_sendmsg & connect Syzkaller got the following report: BUG: KASAN: use-after-free in sk_setup_caps+0x621/0x690 net/core/sock.c:2018 Read of size 8 at addr ffff888027f82780 by task syz-executor276/3255 The function sk_setup_caps (called by ip6_sk_dst_store_flow-> ip6_dst_store) referenced already freed memory as this memory was freed by parallel task in udpv6_sendmsg->ip6_sk_dst_lookup_flow-> sk_dst_check. task1 (connect) task2 (udp6_sendmsg) sk_setup_caps->sk_dst_set | | sk_dst_check-> | sk_dst_set | dst_release sk_setup_caps references | to already freed dst_entry| The reason for this race condition is: sk_setup_caps() keeps using the dst after transferring the ownership to the dst cache. Found by Linux Verification Center (linuxtesting.org) with syzkaller. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Vladislav Efanov Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sock.c b/net/core/sock.c index 5440e67bcfe3b..24f2761bdb1d8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2381,7 +2381,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { u32 max_segs = 1; - sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; if (sk_is_tcp(sk)) sk->sk_route_caps |= NETIF_F_GSO; @@ -2400,6 +2399,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) } } sk->sk_gso_max_segs = max_segs; + sk_dst_set(sk, dst); } EXPORT_SYMBOL_GPL(sk_setup_caps); From 81d0fa4cb4fc0e1a49c2b22f92c43d9fe972ebcf Mon Sep 17 00:00:00 2001 From: Pietro Borrello Date: Sat, 28 Jan 2023 16:23:41 +0000 Subject: [PATCH 229/303] tracing/probe: trace_probe_primary_from_call(): checked list_first_entry All callers of trace_probe_primary_from_call() check the return value to be non NULL. However, the function returns list_first_entry(&tpe->probes, ...) which can never be NULL. Additionally, it does not check for the list being possibly empty, possibly causing a type confusion on empty lists. Use list_first_entry_or_null() which solves both problems. Link: https://lore.kernel.org/linux-trace-kernel/20230128-list-entry-null-check-v1-1-8bde6a3da2ef@diag.uniroma1.it/ Fixes: 60d53e2c3b75 ("tracing/probe: Split trace_event related data from trace_probe") Signed-off-by: Pietro Borrello Reviewed-by: Steven Rostedt (Google) Acked-by: Masami Hiramatsu (Google) Acked-by: Mukesh Ojha Cc: stable@vger.kernel.org Signed-off-by: Masami Hiramatsu (Google) --- kernel/trace/trace_probe.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index ef8ed3b65d055..6a4ecfb1da438 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -308,7 +308,7 @@ trace_probe_primary_from_call(struct trace_event_call *call) { struct trace_probe_event *tpe = trace_probe_event_from_call(call); - return list_first_entry(&tpe->probes, struct trace_probe, list); + return list_first_entry_or_null(&tpe->probes, struct trace_probe, list); } static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp) From c034203b6a9dae6751ef4371c18cb77983e30c28 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 29 May 2023 14:35:55 +0300 Subject: [PATCH 230/303] nfsd: fix double fget() bug in __write_ports_addfd() The bug here is that you cannot rely on getting the same socket from multiple calls to fget() because userspace can influence that. This is a kind of double fetch bug. The fix is to delete the svc_alien_sock() function and instead do the checking inside the svc_addsock() function. Fixes: 3064639423c4 ("nfsd: check passed socket's net matches NFSd superblock's one") Signed-off-by: Dan Carpenter Reviewed-by: NeilBrown Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- fs/nfsd/nfsctl.c | 7 +------ include/linux/sunrpc/svcsock.h | 7 +++---- net/sunrpc/svcsock.c | 24 ++++++------------------ 3 files changed, 10 insertions(+), 28 deletions(-) diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index c159817d12821..b4fd7a7062d5e 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -690,16 +690,11 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred if (err != 0 || fd < 0) return -EINVAL; - if (svc_alien_sock(net, fd)) { - printk(KERN_ERR "%s: socket net is different to NFSd's one\n", __func__); - return -EINVAL; - } - err = nfsd_create_serv(net); if (err != 0) return err; - err = svc_addsock(nn->nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred); + err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred); if (err >= 0 && !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1)) diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index d16ae621782c0..a7116048a4d4b 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -61,10 +61,9 @@ int svc_recv(struct svc_rqst *, long); void svc_send(struct svc_rqst *rqstp); void svc_drop(struct svc_rqst *); void svc_sock_update_bufs(struct svc_serv *serv); -bool svc_alien_sock(struct net *net, int fd); -int svc_addsock(struct svc_serv *serv, const int fd, - char *name_return, const size_t len, - const struct cred *cred); +int svc_addsock(struct svc_serv *serv, struct net *net, + const int fd, char *name_return, const size_t len, + const struct cred *cred); void svc_init_xprt_sock(void); void svc_cleanup_xprt_sock(void); struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 63fe7a3389922..f77cebe2c0713 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1480,25 +1480,10 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, return svsk; } -bool svc_alien_sock(struct net *net, int fd) -{ - int err; - struct socket *sock = sockfd_lookup(fd, &err); - bool ret = false; - - if (!sock) - goto out; - if (sock_net(sock->sk) != net) - ret = true; - sockfd_put(sock); -out: - return ret; -} -EXPORT_SYMBOL_GPL(svc_alien_sock); - /** * svc_addsock - add a listener socket to an RPC service * @serv: pointer to RPC service to which to add a new listener + * @net: caller's network namespace * @fd: file descriptor of the new listener * @name_return: pointer to buffer to fill in with name of listener * @len: size of the buffer @@ -1508,8 +1493,8 @@ EXPORT_SYMBOL_GPL(svc_alien_sock); * Name is terminated with '\n'. On error, returns a negative errno * value. */ -int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, - const size_t len, const struct cred *cred) +int svc_addsock(struct svc_serv *serv, struct net *net, const int fd, + char *name_return, const size_t len, const struct cred *cred) { int err = 0; struct socket *so = sockfd_lookup(fd, &err); @@ -1520,6 +1505,9 @@ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, if (!so) return err; + err = -EINVAL; + if (sock_net(so->sk) != net) + goto out; err = -EAFNOSUPPORT; if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) goto out; From 8dc2a7e8027fbeca0c7df81d4c82e735a59b5741 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Fri, 26 May 2023 17:46:30 +0200 Subject: [PATCH 231/303] riscv: Fix relocatable kernels with early alternatives using -fno-pie Early alternatives are called with the mmu disabled, and then should not access any global symbols through the GOT since it requires relocations, relocations that we do before but *virtually*. So only use medany code model for this early code. Signed-off-by: Alexandre Ghiti Tested-by: Conor Dooley # booted on nezha & unmatched Fixes: 39b33072941f ("riscv: Introduce CONFIG_RELOCATABLE") Link: https://lore.kernel.org/r/20230526154630.289374-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/errata/Makefile | 4 ++++ arch/riscv/kernel/Makefile | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile index a1055965fbee6..7b2637c8c3326 100644 --- a/arch/riscv/errata/Makefile +++ b/arch/riscv/errata/Makefile @@ -1,2 +1,6 @@ +ifdef CONFIG_RELOCATABLE +KBUILD_CFLAGS += -fno-pie +endif + obj-$(CONFIG_ERRATA_SIFIVE) += sifive/ obj-$(CONFIG_ERRATA_THEAD) += thead/ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index fbdccc21418a5..153864e4f3996 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -23,6 +23,10 @@ ifdef CONFIG_FTRACE CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE) endif +ifdef CONFIG_RELOCATABLE +CFLAGS_alternative.o += -fno-pie +CFLAGS_cpufeature.o += -fno-pie +endif ifdef CONFIG_KASAN KASAN_SANITIZE_alternative.o := n KASAN_SANITIZE_cpufeature.o := n From 6199d23c91ce53bfed455f09a8c5ed170d516824 Mon Sep 17 00:00:00 2001 From: Bastien Nocera Date: Wed, 31 May 2023 10:24:28 +0200 Subject: [PATCH 232/303] HID: logitech-hidpp: Handle timeout differently from busy If an attempt at contacting a receiver or a device fails because the receiver or device never responds, don't restart the communication, only restart it if the receiver or device answers that it's busy, as originally intended. This was the behaviour on communication timeout before commit 586e8fede795 ("HID: logitech-hidpp: Retry commands when device is busy"). This fixes some overly long waits in a critical path on boot, when checking whether the device is connected by getting its HID++ version. Signed-off-by: Bastien Nocera Suggested-by: Mark Lord Fixes: 586e8fede795 ("HID: logitech-hidpp: Retry commands when device is busy") Link: https://bugzilla.kernel.org/show_bug.cgi?id=217412 Signed-off-by: Jiri Kosina --- drivers/hid/hid-logitech-hidpp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 0fcfd85fea0fe..2246044b16393 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -314,6 +314,7 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp, dbg_hid("%s:timeout waiting for response\n", __func__); memset(response, 0, sizeof(struct hidpp_report)); ret = -ETIMEDOUT; + goto exit; } if (response->report_id == REPORT_ID_HIDPP_SHORT && From 6d074ce231772c66e648a61f6bd2245e7129d1f5 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 29 May 2023 12:50:34 -0700 Subject: [PATCH 233/303] scsi: stex: Fix gcc 13 warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc 13 may assign another type to enumeration constants than gcc 12. Split the large enum at the top of source file stex.c such that the type of the constants used in time expressions is changed back to the same type chosen by gcc 12. This patch suppresses compiler warnings like this one: In file included from ./include/linux/bitops.h:7, from ./include/linux/kernel.h:22, from drivers/scsi/stex.c:13: drivers/scsi/stex.c: In function ‘stex_common_handshake’: ./include/linux/typecheck.h:12:25: error: comparison of distinct pointer types lacks a cast [-Werror] 12 | (void)(&__dummy == &__dummy2); \ | ^~ ./include/linux/jiffies.h:106:10: note: in expansion of macro ‘typecheck’ 106 | typecheck(unsigned long, b) && \ | ^~~~~~~~~ drivers/scsi/stex.c:1035:29: note: in expansion of macro ‘time_after’ 1035 | if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { | ^~~~~~~~~~ See also https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107405. Cc: stable@vger.kernel.org Acked-by: Randy Dunlap Tested-by: Randy Dunlap # build-tested Signed-off-by: Bart Van Assche Link: https://lore.kernel.org/r/20230529195034.3077-1-bvanassche@acm.org Signed-off-by: Martin K. Petersen --- drivers/scsi/stex.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 5b230e149c3df..8ffb75be99bc8 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -109,7 +109,9 @@ enum { TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, TASK_ATTRIBUTE_ORDERED = 0x2, TASK_ATTRIBUTE_ACA = 0x4, +}; +enum { SS_STS_NORMAL = 0x80000000, SS_STS_DONE = 0x40000000, SS_STS_HANDSHAKE = 0x20000000, @@ -121,7 +123,9 @@ enum { SS_I2H_REQUEST_RESET = 0x2000, SS_MU_OPERATIONAL = 0x80000000, +}; +enum { STEX_CDB_LENGTH = 16, STATUS_VAR_LEN = 128, From 856303797724d28f1d65b702f0eadcee1ea7abf5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 14:54:54 +0200 Subject: [PATCH 234/303] nvme: fix the name of Zone Append for verbose logging No Management involved in Zone Appened. Fixes: bd83fe6f2cd2 ("nvme: add verbose error logging") Signed-off-by: Christoph Hellwig Reviewed-by: Alan Adamson Signed-off-by: Keith Busch --- drivers/nvme/host/constants.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index bc523ca022548..5e4f8848dce08 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -21,7 +21,7 @@ static const char * const nvme_ops[] = { [nvme_cmd_resv_release] = "Reservation Release", [nvme_cmd_zone_mgmt_send] = "Zone Management Send", [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", - [nvme_cmd_zone_append] = "Zone Management Append", + [nvme_cmd_zone_append] = "Zone Append", }; static const char * const nvme_admin_ops[] = { From 8fe72b76db79d694858e872370df49676bc3be8c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 5 May 2023 12:22:09 +0300 Subject: [PATCH 235/303] mailbox: mailbox-test: fix a locking issue in mbox_test_message_write() There was a bug where this code forgot to unlock the tdev->mutex if the kzalloc() failed. Fix this issue, by moving the allocation outside the lock. Fixes: 2d1e952a2b8e ("mailbox: mailbox-test: Fix potential double-free in mbox_test_message_write()") Signed-off-by: Dan Carpenter Reviewed-by: Lee Jones Signed-off-by: Jassi Brar --- drivers/mailbox/mailbox-test.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index c4a705c303314..fc6a12a51b403 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c @@ -98,6 +98,7 @@ static ssize_t mbox_test_message_write(struct file *filp, size_t count, loff_t *ppos) { struct mbox_test_device *tdev = filp->private_data; + char *message; void *data; int ret; @@ -113,12 +114,13 @@ static ssize_t mbox_test_message_write(struct file *filp, return -EINVAL; } - mutex_lock(&tdev->mutex); - - tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL); - if (!tdev->message) + message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL); + if (!message) return -ENOMEM; + mutex_lock(&tdev->mutex); + + tdev->message = message; ret = copy_from_user(tdev->message, userbuf, count); if (ret) { ret = -EFAULT; From ffa28312e2837ae788855840e126fa6f8a1ac35d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 16 May 2023 11:12:51 +0300 Subject: [PATCH 236/303] firmware_loader: Fix a NULL vs IS_ERR() check The crypto_alloc_shash() function doesn't return NULL, it returns error pointers. Update the check accordingly. Fixes: 02fe26f25325 ("firmware_loader: Add debug message with checksum for FW file") Signed-off-by: Dan Carpenter Reviewed-by: Cezary Rojewski Acked-by: Luis Chamberlain Link: https://lore.kernel.org/r/36ef6042-ce74-4e8e-9e2c-5b5c28940610@kili.mountain Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_loader/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 9d79d5ad91024..b58c42f1b1ce6 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -812,7 +812,7 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st char *outbuf; alg = crypto_alloc_shash("sha256", 0, 0); - if (!alg) + if (IS_ERR(alg)) return; sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); From 4acfe3dfde685a5a9eaec5555351918e2d7266a1 Mon Sep 17 00:00:00 2001 From: Mirsad Goran Todorovac Date: Tue, 9 May 2023 10:47:45 +0200 Subject: [PATCH 237/303] test_firmware: prevent race conditions by a correct implementation of locking Dan Carpenter spotted a race condition in a couple of situations like these in the test_firmware driver: static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) { u8 val; int ret; ret = kstrtou8(buf, 10, &val); if (ret) return ret; mutex_lock(&test_fw_mutex); *(u8 *)cfg = val; mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; } static ssize_t config_num_requests_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; mutex_lock(&test_fw_mutex); if (test_fw_config->reqs) { pr_err("Must call release_all_firmware prior to changing config\n"); rc = -EINVAL; mutex_unlock(&test_fw_mutex); goto out; } mutex_unlock(&test_fw_mutex); rc = test_dev_config_update_u8(buf, count, &test_fw_config->num_requests); out: return rc; } static ssize_t config_read_fw_idx_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return test_dev_config_update_u8(buf, count, &test_fw_config->read_fw_idx); } The function test_dev_config_update_u8() is called from both the locked and the unlocked context, function config_num_requests_store() and config_read_fw_idx_store() which can both be called asynchronously as they are driver's methods, while test_dev_config_update_u8() and siblings change their argument pointed to by u8 *cfg or similar pointer. To avoid deadlock on test_fw_mutex, the lock is dropped before calling test_dev_config_update_u8() and re-acquired within test_dev_config_update_u8() itself, but alas this creates a race condition. Having two locks wouldn't assure a race-proof mutual exclusion. This situation is best avoided by the introduction of a new, unlocked function __test_dev_config_update_u8() which can be called from the locked context and reducing test_dev_config_update_u8() to: static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) { int ret; mutex_lock(&test_fw_mutex); ret = __test_dev_config_update_u8(buf, size, cfg); mutex_unlock(&test_fw_mutex); return ret; } doing the locking and calling the unlocked primitive, which enables both locked and unlocked versions without duplication of code. The similar approach was applied to all functions called from the locked and the unlocked context, which safely mitigates both deadlocks and race conditions in the driver. __test_dev_config_update_bool(), __test_dev_config_update_u8() and __test_dev_config_update_size_t() unlocked versions of the functions were introduced to be called from the locked contexts as a workaround without releasing the main driver's lock and thereof causing a race condition. The test_dev_config_update_bool(), test_dev_config_update_u8() and test_dev_config_update_size_t() locked versions of the functions are being called from driver methods without the unnecessary multiplying of the locking and unlocking code for each method, and complicating the code with saving of the return value across lock. Fixes: 7feebfa487b92 ("test_firmware: add support for request_firmware_into_buf") Cc: Luis Chamberlain Cc: Greg Kroah-Hartman Cc: Russ Weight Cc: Takashi Iwai Cc: Tianfei Zhang Cc: Shuah Khan Cc: Colin Ian King Cc: Randy Dunlap Cc: linux-kselftest@vger.kernel.org Cc: stable@vger.kernel.org # v5.4 Suggested-by: Dan Carpenter Signed-off-by: Mirsad Goran Todorovac Link: https://lore.kernel.org/r/20230509084746.48259-1-mirsad.todorovac@alu.unizg.hr Signed-off-by: Greg Kroah-Hartman --- lib/test_firmware.c | 52 ++++++++++++++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 17 deletions(-) diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 05ed84c2fc4ce..35417e0af3f48 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -353,16 +353,26 @@ static ssize_t config_test_show_str(char *dst, return len; } -static int test_dev_config_update_bool(const char *buf, size_t size, +static inline int __test_dev_config_update_bool(const char *buf, size_t size, bool *cfg) { int ret; - mutex_lock(&test_fw_mutex); if (kstrtobool(buf, cfg) < 0) ret = -EINVAL; else ret = size; + + return ret; +} + +static int test_dev_config_update_bool(const char *buf, size_t size, + bool *cfg) +{ + int ret; + + mutex_lock(&test_fw_mutex); + ret = __test_dev_config_update_bool(buf, size, cfg); mutex_unlock(&test_fw_mutex); return ret; @@ -373,7 +383,8 @@ static ssize_t test_dev_config_show_bool(char *buf, bool val) return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static int test_dev_config_update_size_t(const char *buf, +static int __test_dev_config_update_size_t( + const char *buf, size_t size, size_t *cfg) { @@ -384,9 +395,7 @@ static int test_dev_config_update_size_t(const char *buf, if (ret) return ret; - mutex_lock(&test_fw_mutex); *(size_t *)cfg = new; - mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; @@ -402,7 +411,7 @@ static ssize_t test_dev_config_show_int(char *buf, int val) return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) +static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) { u8 val; int ret; @@ -411,14 +420,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) if (ret) return ret; - mutex_lock(&test_fw_mutex); *(u8 *)cfg = val; - mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; } +static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) +{ + int ret; + + mutex_lock(&test_fw_mutex); + ret = __test_dev_config_update_u8(buf, size, cfg); + mutex_unlock(&test_fw_mutex); + + return ret; +} + static ssize_t test_dev_config_show_u8(char *buf, u8 val) { return snprintf(buf, PAGE_SIZE, "%u\n", val); @@ -471,10 +489,10 @@ static ssize_t config_num_requests_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_u8(buf, count, - &test_fw_config->num_requests); + rc = __test_dev_config_update_u8(buf, count, + &test_fw_config->num_requests); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -518,10 +536,10 @@ static ssize_t config_buf_size_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_size_t(buf, count, - &test_fw_config->buf_size); + rc = __test_dev_config_update_size_t(buf, count, + &test_fw_config->buf_size); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -548,10 +566,10 @@ static ssize_t config_file_offset_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_size_t(buf, count, - &test_fw_config->file_offset); + rc = __test_dev_config_update_size_t(buf, count, + &test_fw_config->file_offset); + mutex_unlock(&test_fw_mutex); out: return rc; From be37bed754ed90b2655382f93f9724b3c1aae847 Mon Sep 17 00:00:00 2001 From: Mirsad Goran Todorovac Date: Tue, 9 May 2023 10:47:47 +0200 Subject: [PATCH 238/303] test_firmware: fix a memory leak with reqs buffer Dan Carpenter spotted that test_fw_config->reqs will be leaked if trigger_batched_requests_store() is called two or more times. The same appears with trigger_batched_requests_async_store(). This bug wasn't trigger by the tests, but observed by Dan's visual inspection of the code. The recommended workaround was to return -EBUSY if test_fw_config->reqs is already allocated. Fixes: 7feebfa487b92 ("test_firmware: add support for request_firmware_into_buf") Cc: Luis Chamberlain Cc: Greg Kroah-Hartman Cc: Russ Weight Cc: Tianfei Zhang Cc: Shuah Khan Cc: Colin Ian King Cc: Randy Dunlap Cc: linux-kselftest@vger.kernel.org Cc: stable@vger.kernel.org # v5.4 Suggested-by: Dan Carpenter Suggested-by: Takashi Iwai Signed-off-by: Mirsad Goran Todorovac Reviewed-by: Dan Carpenter Acked-by: Luis Chamberlain Link: https://lore.kernel.org/r/20230509084746.48259-2-mirsad.todorovac@alu.unizg.hr Signed-off-by: Greg Kroah-Hartman --- lib/test_firmware.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 35417e0af3f48..91b232ed31619 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -913,6 +913,11 @@ static ssize_t trigger_batched_requests_store(struct device *dev, mutex_lock(&test_fw_mutex); + if (test_fw_config->reqs) { + rc = -EBUSY; + goto out_bail; + } + test_fw_config->reqs = vzalloc(array3_size(sizeof(struct test_batched_req), test_fw_config->num_requests, 2)); @@ -1011,6 +1016,11 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, mutex_lock(&test_fw_mutex); + if (test_fw_config->reqs) { + rc = -EBUSY; + goto out_bail; + } + test_fw_config->reqs = vzalloc(array3_size(sizeof(struct test_batched_req), test_fw_config->num_requests, 2)); From 48e156023059e57a8fc68b498439832f7600ffff Mon Sep 17 00:00:00 2001 From: Mirsad Goran Todorovac Date: Tue, 9 May 2023 10:47:49 +0200 Subject: [PATCH 239/303] test_firmware: fix the memory leak of the allocated firmware buffer The following kernel memory leak was noticed after running tools/testing/selftests/firmware/fw_run_tests.sh: [root@pc-mtodorov firmware]# cat /sys/kernel/debug/kmemleak . . . unreferenced object 0xffff955389bc3400 (size 1024): comm "test_firmware-0", pid 5451, jiffies 4294944822 (age 65.652s) hex dump (first 32 bytes): 47 48 34 35 36 37 0a 00 00 00 00 00 00 00 00 00 GH4567.......... 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [] slab_post_alloc_hook+0x8c/0x3c0 [] __kmem_cache_alloc_node+0x184/0x240 [] kmalloc_trace+0x2e/0xc0 [] test_fw_run_batch_request+0x9d/0x180 [] kthread+0x10b/0x140 [] ret_from_fork+0x29/0x50 unreferenced object 0xffff9553c334b400 (size 1024): comm "test_firmware-1", pid 5452, jiffies 4294944822 (age 65.652s) hex dump (first 32 bytes): 47 48 34 35 36 37 0a 00 00 00 00 00 00 00 00 00 GH4567.......... 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [] slab_post_alloc_hook+0x8c/0x3c0 [] __kmem_cache_alloc_node+0x184/0x240 [] kmalloc_trace+0x2e/0xc0 [] test_fw_run_batch_request+0x9d/0x180 [] kthread+0x10b/0x140 [] ret_from_fork+0x29/0x50 unreferenced object 0xffff9553c334f000 (size 1024): comm "test_firmware-2", pid 5453, jiffies 4294944822 (age 65.652s) hex dump (first 32 bytes): 47 48 34 35 36 37 0a 00 00 00 00 00 00 00 00 00 GH4567.......... 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [] slab_post_alloc_hook+0x8c/0x3c0 [] __kmem_cache_alloc_node+0x184/0x240 [] kmalloc_trace+0x2e/0xc0 [] test_fw_run_batch_request+0x9d/0x180 [] kthread+0x10b/0x140 [] ret_from_fork+0x29/0x50 unreferenced object 0xffff9553c3348400 (size 1024): comm "test_firmware-3", pid 5454, jiffies 4294944822 (age 65.652s) hex dump (first 32 bytes): 47 48 34 35 36 37 0a 00 00 00 00 00 00 00 00 00 GH4567.......... 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [] slab_post_alloc_hook+0x8c/0x3c0 [] __kmem_cache_alloc_node+0x184/0x240 [] kmalloc_trace+0x2e/0xc0 [] test_fw_run_batch_request+0x9d/0x180 [] kthread+0x10b/0x140 [] ret_from_fork+0x29/0x50 [root@pc-mtodorov firmware]# Note that the size 1024 corresponds to the size of the test firmware buffer. The actual number of the buffers leaked is around 70-110, depending on the test run. The cause of the leak is the following: request_partial_firmware_into_buf() and request_firmware_into_buf() provided firmware buffer isn't released on release_firmware(), we have allocated it and we are responsible for deallocating it manually. This is introduced in a number of context where previously only release_firmware() was called, which was insufficient. Reported-by: Mirsad Goran Todorovac Fixes: 7feebfa487b92 ("test_firmware: add support for request_firmware_into_buf") Cc: Greg Kroah-Hartman Cc: Dan Carpenter Cc: Takashi Iwai Cc: Luis Chamberlain Cc: Russ Weight Cc: Tianfei zhang Cc: Christophe JAILLET Cc: Zhengchao Shao Cc: Colin Ian King Cc: linux-kernel@vger.kernel.org Cc: Kees Cook Cc: Scott Branden Cc: Luis R. Rodriguez Cc: linux-kselftest@vger.kernel.org Cc: stable@vger.kernel.org # v5.4 Signed-off-by: Mirsad Goran Todorovac Link: https://lore.kernel.org/r/20230509084746.48259-3-mirsad.todorovac@alu.unizg.hr Signed-off-by: Greg Kroah-Hartman --- lib/test_firmware.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 91b232ed31619..1d7d480b8eeb3 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -45,6 +45,7 @@ struct test_batched_req { bool sent; const struct firmware *fw; const char *name; + const char *fw_buf; struct completion completion; struct task_struct *task; struct device *dev; @@ -175,8 +176,14 @@ static void __test_release_all_firmware(void) for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (req->fw) + if (req->fw) { + if (req->fw_buf) { + kfree_const(req->fw_buf); + req->fw_buf = NULL; + } release_firmware(req->fw); + req->fw = NULL; + } } vfree(test_fw_config->reqs); @@ -670,6 +677,8 @@ static ssize_t trigger_request_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); + if (test_fw_config->reqs) + __test_release_all_firmware(); test_firmware = NULL; rc = request_firmware(&test_firmware, name, dev); if (rc) { @@ -770,6 +779,8 @@ static ssize_t trigger_async_request_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); test_firmware = NULL; + if (test_fw_config->reqs) + __test_release_all_firmware(); rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL, NULL, trigger_async_request_cb); if (rc) { @@ -812,6 +823,8 @@ static ssize_t trigger_custom_fallback_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); + if (test_fw_config->reqs) + __test_release_all_firmware(); test_firmware = NULL; rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name, dev, GFP_KERNEL, NULL, @@ -874,6 +887,8 @@ static int test_fw_run_batch_request(void *data) test_fw_config->buf_size); if (!req->fw) kfree(test_buf); + else + req->fw_buf = test_buf; } else { req->rc = test_fw_config->req_firmware(&req->fw, req->name, @@ -934,6 +949,7 @@ static ssize_t trigger_batched_requests_store(struct device *dev, req->fw = NULL; req->idx = i; req->name = test_fw_config->name; + req->fw_buf = NULL; req->dev = dev; init_completion(&req->completion); req->task = kthread_run(test_fw_run_batch_request, req, @@ -1038,6 +1054,7 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; req->name = test_fw_config->name; + req->fw_buf = NULL; req->fw = NULL; req->idx = i; init_completion(&req->completion); From 126310c9f669c9a8c875a3e5c2292299ca90225d Mon Sep 17 00:00:00 2001 From: K Prateek Nayak Date: Mon, 8 May 2023 14:11:14 +0530 Subject: [PATCH 240/303] drivers: base: cacheinfo: Fix shared_cpu_map changes in event of CPU hotplug While building the shared_cpu_map, check if the cache level and cache type matches. On certain systems that build the cache topology based on the instance ID, there are cases where the same ID may repeat across multiple cache levels, leading inaccurate topology. In event of CPU offlining, the cache_shared_cpu_map_remove() does not consider if IDs at same level are being compared. As a result, when same IDs repeat across different cache levels, the CPU going offline is not removed from all the shared_cpu_map. Below is the output of cache topology of CPU8 and it's SMT sibling after CPU8 is offlined on a dual socket 3rd Generation AMD EPYC processor (2 x 64C/128T) running kernel release v6.3: # for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8-15,136-143 # echo 0 > /sys/devices/system/cpu/cpu8/online # for i in /sys/devices/system/cpu/cpu136/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu136/cache/index0/shared_cpu_list: 136 /sys/devices/system/cpu/cpu136/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu136/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu136/cache/index3/shared_cpu_list: 9-15,136-143 CPU8 is removed from index0 (L1i) but remains in the shared_cpu_list of index1 (L1d) and index2 (L2). Since L1i, L1d, and L2 are shared by the SMT siblings, and they have the same cache instance ID, CPU 2 is only removed from the first index with matching ID which is index1 (L1i) in this case. With this fix, the results are as expected when performing the same experiment on the same system: # for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8-15,136-143 # echo 0 > /sys/devices/system/cpu/cpu8/online # for i in /sys/devices/system/cpu/cpu136/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu136/cache/index0/shared_cpu_list: 136 /sys/devices/system/cpu/cpu136/cache/index1/shared_cpu_list: 136 /sys/devices/system/cpu/cpu136/cache/index2/shared_cpu_list: 136 /sys/devices/system/cpu/cpu136/cache/index3/shared_cpu_list: 9-15,136-143 When rebuilding topology, the same problem appears as cache_shared_cpu_map_setup() implements a similar logic. Consider the same 3rd Generation EPYC processor: CPUs in Core 1, that share the L1 and L2 caches, have L1 and L2 instance ID as 1. For all the CPUs on the second chiplet, the L3 ID is also 1 leading to grouping on CPUs from Core 1 (1, 17) and the entire second chiplet (8-15, 24-31) as CPUs sharing one cache domain. This went undetected since x86 processors depended on arch specific populate_cache_leaves() method to repopulate the shared_cpus_map when CPU came back online until kernel release v6.3-rc5. Fixes: 198102c9103f ("cacheinfo: Fix shared_cpu_map to handle shared caches at different levels") Signed-off-by: K Prateek Nayak Reviewed-by: Sudeep Holla Link: https://lore.kernel.org/r/20230508084115.1157-2-kprateek.nayak@amd.com Signed-off-by: Greg Kroah-Hartman --- drivers/base/cacheinfo.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index bba3482ddeb82..d1ae443fd7a07 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -388,6 +388,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) continue;/* skip if itself or no cacheinfo */ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); + + /* + * Comparing cache IDs only makes sense if the leaves + * belong to the same cache level of same type. Skip + * the check if level and type do not match. + */ + if (sib_leaf->level != this_leaf->level || + sib_leaf->type != this_leaf->type) + continue; + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(i, &this_leaf->shared_cpu_map); @@ -419,6 +429,16 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); + + /* + * Comparing cache IDs only makes sense if the leaves + * belong to the same cache level of same type. Skip + * the check if level and type do not match. + */ + if (sib_leaf->level != this_leaf->level || + sib_leaf->type != this_leaf->type) + continue; + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); From c26fabe73330d983c7ce822c6b6ec0879b4da61f Mon Sep 17 00:00:00 2001 From: K Prateek Nayak Date: Mon, 8 May 2023 14:11:15 +0530 Subject: [PATCH 241/303] drivers: base: cacheinfo: Update cpu_map_populated during CPU Hotplug Until commit 5c2712387d48 ("cacheinfo: Fix LLC is not exported through sysfs"), cacheinfo called populate_cache_leaves() for CPU coming online which let the arch specific functions handle (at least on x86) populating the shared_cpu_map. However, with the changes in the aforementioned commit, populate_cache_leaves() is not called when a CPU comes online as a result of hotplug since last_level_cache_is_valid() returns true as the cacheinfo data is not discarded. The CPU coming online is not present in shared_cpu_map, however, it will not be added since the cpu_cacheinfo->cpu_map_populated flag is set (it is set in populate_cache_leaves() when cacheinfo is first populated for x86) This can lead to inconsistencies in the shared_cpu_map when an offlined CPU comes online again. Example below depicts the inconsistency in the shared_cpu_list in cacheinfo when CPU8 is offlined and onlined again on a 3rd Generation EPYC processor: # for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8-15,136-143 # echo 0 > /sys/devices/system/cpu/cpu8/online # echo 1 > /sys/devices/system/cpu/cpu8/online # for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8 # cat /sys/devices/system/cpu/cpu136/cache/index0/shared_cpu_list 136 # cat /sys/devices/system/cpu/cpu136/cache/index3/shared_cpu_list 9-15,136-143 Clear the flag when the CPU is removed from shared_cpu_map when cache_shared_cpu_map_remove() is called during CPU hotplug. This will allow cache_shared_cpu_map_setup() to add the CPU coming back online in the shared_cpu_map. Set the flag again when the shared_cpu_map is setup. Following are results of performing the same test as described above with the changes: # for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8-15,136-143 # echo 0 > /sys/devices/system/cpu/cpu8/online # echo 1 > /sys/devices/system/cpu/cpu8/online # for i in /sys/devices/system/cpu/cpu8/cache/index*/shared_cpu_list; do echo -n "$i: "; cat $i; done /sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list: 8,136 /sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list: 8-15,136-143 # cat /sys/devices/system/cpu/cpu136/cache/index0/shared_cpu_list 8,136 # cat /sys/devices/system/cpu/cpu136/cache/index3/shared_cpu_list 8-15,136-143 Fixes: 5c2712387d48 ("cacheinfo: Fix LLC is not exported through sysfs") Signed-off-by: K Prateek Nayak Reviewed-by: Yicong Yang Reviewed-by: Sudeep Holla Link: https://lore.kernel.org/r/20230508084115.1157-3-kprateek.nayak@amd.com Signed-off-by: Greg Kroah-Hartman --- drivers/base/cacheinfo.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index d1ae443fd7a07..cbae8be1fe520 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -410,11 +410,14 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) coherency_max_size = this_leaf->coherency_line_size; } + /* shared_cpu_map is now populated for the cpu */ + this_cpu_ci->cpu_map_populated = true; return 0; } static void cache_shared_cpu_map_remove(unsigned int cpu) { + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; unsigned int sibling, index, sib_index; @@ -447,6 +450,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) } } } + + /* cpu is no longer populated in the shared map */ + this_cpu_ci->cpu_map_populated = false; } static void free_cache_attributes(unsigned int cpu) From 6a07826f2057b5fa1c479ba56460195882464270 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Sun, 21 May 2023 09:24:00 +0800 Subject: [PATCH 242/303] drm/amd/pm: reverse mclk and fclk clocks levels for SMU v13.0.4 This patch reverses the DPM clocks levels output of pp_dpm_mclk and pp_dpm_fclk. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels that from the DFPstateTable are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 8fa9a36c38b64..6d9760eac16d8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; for (i = 0; i < count; i++) { - ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) break; From c1d35412b3e826ae8119e3fb5f51dd0fa5b6b567 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Sun, 21 May 2023 10:28:05 +0800 Subject: [PATCH 243/303] drm/amd/pm: reverse mclk clocks levels for SMU v13.0.5 This patch reverses the DPM clocks levels output of pp_dpm_mclk. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels that from the DFPstateTable are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 66445964efbd1..0081fa607e02e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -866,7 +866,7 @@ static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu, static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; @@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; From f1373a97a41f429e0095d4be388092ffa3c1a157 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Sun, 21 May 2023 10:35:59 +0800 Subject: [PATCH 244/303] drm/amd/pm: reverse mclk and fclk clocks levels for yellow carp This patch reverses the DPM clocks levels output of pp_dpm_mclk and pp_dpm_fclk. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels that from the DFPstateTable are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 04e56b0b3033e..798f36cfcebd3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1000,7 +1000,7 @@ static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, goto print_clk_out; for (i = 0; i < count; i++) { - ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; From bfc03568d9d81332382c73a1985a90c4506bd36c Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Sun, 21 May 2023 11:10:19 +0800 Subject: [PATCH 245/303] drm/amd/pm: reverse mclk and fclk clocks levels for vangogh This patch reverses the DPM clocks levels output of pp_dpm_mclk and pp_dpm_fclk. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels that from the DFPstateTable are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 7433dcaa16e04..067b4e0b026c0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) @@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu, case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) From 55e02c14f9b5fd973ba32a16a715baa42617f9c6 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Mon, 22 May 2023 23:17:28 +0800 Subject: [PATCH 246/303] drm/amd/pm: reverse mclk and fclk clocks levels for renoir This patch reverses the DPM clocks levels output of pp_dpm_mclk and pp_dpm_fclk for renoir. On dGPUs and older APUs we expose the levels from lowest clocks to highest clocks. But for some APUs, the clocks levels are given the reversed orders by PMFW. Like the memory DPM clocks that are exposed by pp_dpm_mclk. It's not intuitive that they are reversed on these APUs. All tools and software that talks to the driver then has to know different ways to interpret the data depending on the asic. So we need to reverse them to expose the clocks levels from the driver consistently. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 5cdc07165480b..8a8ba25c9ad7c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu, case SMU_VCLK: case SMU_DCLK: for (i = 0; i < count; i++) { - ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) From c14fb01c4629b96b64ab54caea7e543a0239f14e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 22 May 2023 15:08:22 +0200 Subject: [PATCH 247/303] Revert "drm/amd/display: Block optimize on consecutive FAMS enables" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit ce560ac40272a5c8b5b68a9d63a75edd9e66aed2. It depends on its parent commit, which we want to revert. Acked-by: Alex Deucher Signed-off-by: Michel Dänzer [Hamza: fix a whitespace issue in dcn30_prepare_bandwidth()] Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn20/dcn20_hwseq.c | 3 --- .../drm/amd/display/dc/dcn30/dcn30_hwseq.c | 22 +++---------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 422fbf79da64f..6ce10fd4bb1a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2117,9 +2117,6 @@ void dcn20_optimize_bandwidth( dc_dmub_srv_p_state_delegate(dc, true, context); context->bw_ctx.bw.dcn.clk.p_state_change_support = true; - dc->clk_mgr->clks.fw_based_mclk_switching = true; - } else { - dc->clk_mgr->clks.fw_based_mclk_switching = false; } dc->clk_mgr->funcs->update_clocks( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 8263a07f265f2..f923224e85fc6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -983,13 +983,9 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, } void dcn30_prepare_bandwidth(struct dc *dc, - struct dc_state *context) + struct dc_state *context) { - bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support; - /* Any transition into an FPO config should disable MCLK switching first to avoid - * driver and FW P-State synchronization issues. - */ - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { dc->optimized_required = true; context->bw_ctx.bw.dcn.clk.p_state_change_support = false; } @@ -1000,19 +996,7 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - /* - * enabled -> enabled: do not disable - * enabled -> disabled: disable - * disabled -> enabled: don't care - * disabled -> disabled: don't care - */ - if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) - dc_dmub_srv_p_state_delegate(dc, false, context); - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) { - /* After disabling P-State, restore the original value to ensure we get the correct P-State - * on the next optimize. */ - context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; - } + dc_dmub_srv_p_state_delegate(dc, false, context); } From 8e1b45c578b799510f9a01a9745a737e74f43cb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Mon, 22 May 2023 15:08:23 +0200 Subject: [PATCH 248/303] Revert "drm/amd/display: Do not set drr on pipe commit" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 474f01015ffdb74e01c2eb3584a2822c64e7b2be. Caused a regression: Samsung Odyssey Neo G9, running at 5120x1440@240/VRR, connected to Navi 21 via DisplayPort, blanks and the GPU hangs while starting the Steam game Assetto Corsa Competizione (via Proton 7.0). Example dmesg excerpt: amdgpu 0000:0c:00.0: [drm] ERROR [CRTC:82:crtc-0] flip_done timed out NMI watchdog: Watchdog detected hard LOCKUP on cpu 6 [...] RIP: 0010:amdgpu_device_rreg.part.0+0x2f/0xf0 [amdgpu] Code: 41 54 44 8d 24 b5 00 00 00 00 55 89 f5 53 48 89 fb 4c 3b a7 60 0b 00 00 73 6a 83 e2 02 74 29 4c 03 a3 68 0b 00 00 45 8b 24 24 <48> 8b 43 08 0f b7 70 3e 66 90 44 89 e0 5b 5d 41 5c 31 d2 31 c9 31 RSP: 0000:ffffb39a119dfb88 EFLAGS: 00000086 RAX: ffffffffc0eb96a0 RBX: ffff9e7963dc0000 RCX: 0000000000007fff RDX: 0000000000000000 RSI: 0000000000004ff6 RDI: ffff9e7963dc0000 RBP: 0000000000004ff6 R08: ffffb39a119dfc40 R09: 0000000000000010 R10: ffffb39a119dfc40 R11: ffffb39a119dfc44 R12: 00000000000e05ae R13: 0000000000000000 R14: ffff9e7963dc0010 R15: 0000000000000000 FS: 000000001012f6c0(0000) GS:ffff9e805eb80000(0000) knlGS:000000007fd40000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000461ca000 CR3: 00000002a8a20000 CR4: 0000000000350ee0 Call Trace: dm_read_reg_func+0x37/0xc0 [amdgpu] generic_reg_get2+0x22/0x60 [amdgpu] optc1_get_crtc_scanoutpos+0x6a/0xc0 [amdgpu] dc_stream_get_scanoutpos+0x74/0x90 [amdgpu] dm_crtc_get_scanoutpos+0x82/0xf0 [amdgpu] amdgpu_display_get_crtc_scanoutpos+0x91/0x190 [amdgpu] ? dm_read_reg_func+0x37/0xc0 [amdgpu] amdgpu_get_vblank_counter_kms+0xb4/0x1a0 [amdgpu] dm_pflip_high_irq+0x213/0x2f0 [amdgpu] amdgpu_dm_irq_handler+0x8a/0x200 [amdgpu] amdgpu_irq_dispatch+0xd4/0x220 [amdgpu] amdgpu_ih_process+0x7f/0x110 [amdgpu] amdgpu_irq_handler+0x1f/0x70 [amdgpu] __handle_irq_event_percpu+0x46/0x1b0 handle_irq_event+0x34/0x80 handle_edge_irq+0x9f/0x240 __common_interrupt+0x66/0x110 common_interrupt+0x5c/0xd0 asm_common_interrupt+0x22/0x40 Reviewed-by: Aurabindo Pillai Acked-by: Alex Deucher Signed-off-by: Michel Dänzer Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 6 ------ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 7 ------- 2 files changed, 13 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 6ce10fd4bb1a4..5403e9399a465 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2113,12 +2113,6 @@ void dcn20_optimize_bandwidth( if (hubbub->funcs->program_compbuf_size) hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true); - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - dc_dmub_srv_p_state_delegate(dc, - true, context); - context->bw_ctx.bw.dcn.clk.p_state_change_support = true; - } - dc->clk_mgr->funcs->update_clocks( dc->clk_mgr, context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index f923224e85fc6..32121db2851e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -985,18 +985,11 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc, void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context) { - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - dc->optimized_required = true; - context->bw_ctx.bw.dcn.clk.p_state_change_support = false; - } - if (dc->clk_mgr->dc_mode_softmax_enabled) if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - - dc_dmub_srv_p_state_delegate(dc, false, context); } From ac1d8e2f074d9bffc2d368ad0720cdbb4c938fa5 Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 21:45:51 -0400 Subject: [PATCH 249/303] drm/amdgpu: separate ras irq from vcn instance irq for UVD_POISON Separate vcn RAS poison consumption handling from the instance irq, and register dedicated ras_poison_irq src and funcs for UVD_POISON. v2: - Separate ras irq from vcn instance irq - Improve the subject and code comments v3: - Split the patch into three parts - Improve the code comments Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 27 ++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 3 +++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e63fcc58e8e06..2d94f1b63bd6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, return 0; } +int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r, i; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + + r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); + if (r) + goto late_fini; + } + } + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + return r; +} + int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) { int err; @@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) adev->vcn.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index c730949ece7d9..f1397ef66fd77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -234,6 +234,7 @@ struct amdgpu_vcn_inst { struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; atomic_t sched_score; struct amdgpu_irq_src irq; + struct amdgpu_irq_src ras_poison_irq; struct amdgpu_vcn_reg external; struct amdgpu_bo *dpg_sram_bo; struct dpg_pause_state pause_state; @@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block); int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); #endif From 6889f28c736c357700f5755fed852a2badc15a7b Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 21:52:00 -0400 Subject: [PATCH 250/303] drm/amdgpu: add RAS POISON interrupt funcs for vcn_v2_6 Add ras_poison_irq and functions. Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index ab0b45d0ead18..515681c88dcbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle) /* VCN POISON TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], - VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq); + VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq); if (r) return r; } @@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle) (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, i, mmUVD_STATUS))) vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); } return 0; @@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); break; - case VCN_2_6__SRCID_UVD_POISON: - amdgpu_vcn_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { .process = vcn_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = { + .set = vcn_v2_6_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) continue; adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; + + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs; } } @@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = { static struct amdgpu_vcn_ras vcn_v2_6_ras = { .ras_block = { .hw_ops = &vcn_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_vcn_ras_late_init, }, }; From 020c76d983151f6f6c9493a3bbe83c1ec927617a Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 22:09:43 -0400 Subject: [PATCH 251/303] drm/amdgpu: add RAS POISON interrupt funcs for vcn_v4_0 Add ras_poison_irq and functions. And fix the amdgpu_irq_put call trace in vcn_v4_0_hw_fini. [ 44.563572] RIP: 0010:amdgpu_irq_put+0xa4/0xc0 [amdgpu] [ 44.563629] RSP: 0018:ffffb36740edfc90 EFLAGS: 00010246 [ 44.563630] RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000000000 [ 44.563630] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 [ 44.563631] RBP: ffffb36740edfcb0 R08: 0000000000000000 R09: 0000000000000000 [ 44.563631] R10: 0000000000000000 R11: 0000000000000000 R12: ffff954c568e2ea8 [ 44.563631] R13: 0000000000000000 R14: ffff954c568c0000 R15: ffff954c568e2ea8 [ 44.563632] FS: 0000000000000000(0000) GS:ffff954f584c0000(0000) knlGS:0000000000000000 [ 44.563632] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 44.563633] CR2: 00007f028741ba70 CR3: 000000026ca10000 CR4: 0000000000750ee0 [ 44.563633] PKRU: 55555554 [ 44.563633] Call Trace: [ 44.563634] [ 44.563634] vcn_v4_0_hw_fini+0x62/0x160 [amdgpu] [ 44.563700] vcn_v4_0_suspend+0x13/0x30 [amdgpu] [ 44.563755] amdgpu_device_ip_suspend_phase2+0x240/0x470 [amdgpu] [ 44.563806] amdgpu_device_ip_suspend+0x41/0x80 [amdgpu] [ 44.563858] amdgpu_device_pre_asic_reset+0xd9/0x4a0 [amdgpu] [ 44.563909] amdgpu_device_gpu_recover.cold+0x548/0xcf1 [amdgpu] [ 44.564006] amdgpu_debugfs_reset_work+0x4c/0x80 [amdgpu] [ 44.564061] process_one_work+0x21f/0x400 [ 44.564062] worker_thread+0x200/0x3f0 [ 44.564063] ? process_one_work+0x400/0x400 [ 44.564064] kthread+0xee/0x120 [ 44.564065] ? kthread_complete_and_exit+0x20/0x20 [ 44.564066] ret_from_fork+0x22/0x30 Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 36 ++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index bf0674039598d..e5fd1e00914d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -139,7 +139,7 @@ static int vcn_v4_0_sw_init(void *handle) /* VCN POISON TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i], - VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq); + VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq); if (r) return r; @@ -305,8 +305,8 @@ static int vcn_v4_0_hw_fini(void *handle) vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); } } - - amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); } return 0; @@ -1975,6 +1975,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp return 0; } +/** + * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @type: interrupt types + * @state: interrupt states + * + * Set VCN block RAS interrupt state + */ +static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + /** * vcn_v4_0_process_interrupt - process VCN block interrupt * @@ -2007,9 +2025,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); break; - case VCN_4_0__SRCID_UVD_POISON: - amdgpu_vcn_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -2024,6 +2039,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = { .process = vcn_v4_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = { + .set = vcn_v4_0_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + /** * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions * @@ -2041,6 +2061,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev) adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs; + + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs; } } @@ -2114,6 +2137,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = { static struct amdgpu_vcn_ras vcn_v4_0_ras = { .ras_block = { .hw_ops = &vcn_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_vcn_ras_late_init, }, }; From ce784421a3e15fd89d5fc1b9da7d846dd8309661 Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 22:57:19 -0400 Subject: [PATCH 252/303] drm/amdgpu: separate ras irq from jpeg instance irq for UVD_POISON Separate jpegbRAS poison consumption handling from the instance irq, and register dedicated ras_poison_irq src and funcs for UVD_POISON. v2: - Separate ras irq from jpeg instance irq - Improve the subject and code comments v3: - Split the patch into three parts - Improve the code comments Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 27 +++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h | 3 +++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index b07c000fc8ba3..4fa019c8aefc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, return 0; } +int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r, i; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); + if (r) + goto late_fini; + } + } + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + return r; +} + int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) { int err; @@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) adev->jpeg.ras_if = &ras->ras_block.ras_comm; if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index 0ca76f0f23e9c..1471a1ebb0344 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{ struct amdgpu_jpeg_inst { struct amdgpu_ring ring_dec; struct amdgpu_irq_src irq; + struct amdgpu_irq_src ras_poison_irq; struct amdgpu_jpeg_reg external; }; @@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block); int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ From 30b2d778f629d51e2ff30beb6d060a0bd7f70104 Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 22:59:54 -0400 Subject: [PATCH 253/303] drm/amdgpu: add RAS POISON interrupt funcs for jpeg_v2_6 Add ras_poison_irq and functions. Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c | 28 ++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index b040f51d9aa9d..73e0dc5a10cd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i], - VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq); + VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq); if (r) return r; } @@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle) if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0); } return 0; @@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, case VCN_2_0__SRCID__JPEG_DECODE: amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec); break; - case VCN_2_6__SRCID_DJPEG0_POISON: - case VCN_2_6__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); - break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = { .process = jpeg_v2_5_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = { + .set = jpeg_v2_6_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) { int i; @@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev) adev->jpeg.inst[i].irq.num_types = 1; adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs; + + adev->jpeg.inst[i].ras_poison_irq.num_types = 1; + adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs; } } @@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v2_6_ras = { .ras_block = { .hw_ops = &jpeg_v2_6_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; From bc3e1d60f933f823599376f830eb99451afb995a Mon Sep 17 00:00:00 2001 From: Horatio Zhang Date: Mon, 15 May 2023 23:02:10 -0400 Subject: [PATCH 254/303] drm/amdgpu: add RAS POISON interrupt funcs for jpeg_v4_0 Add ras_poison_irq and functions. And fix the amdgpu_irq_put call trace in jpeg_v4_0_hw_fini. [ 50.497562] RIP: 0010:amdgpu_irq_put+0xa4/0xc0 [amdgpu] [ 50.497619] RSP: 0018:ffffaa2400fcfcb0 EFLAGS: 00010246 [ 50.497620] RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000000000 [ 50.497621] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 [ 50.497621] RBP: ffffaa2400fcfcd0 R08: 0000000000000000 R09: 0000000000000000 [ 50.497622] R10: 0000000000000000 R11: 0000000000000000 R12: ffff99b2105242d8 [ 50.497622] R13: 0000000000000000 R14: ffff99b210500000 R15: ffff99b210500000 [ 50.497623] FS: 0000000000000000(0000) GS:ffff99b518480000(0000) knlGS:0000000000000000 [ 50.497623] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 50.497624] CR2: 00007f9d32aa91e8 CR3: 00000001ba210000 CR4: 0000000000750ee0 [ 50.497624] PKRU: 55555554 [ 50.497625] Call Trace: [ 50.497625] [ 50.497627] jpeg_v4_0_hw_fini+0x43/0xc0 [amdgpu] [ 50.497693] jpeg_v4_0_suspend+0x13/0x30 [amdgpu] [ 50.497751] amdgpu_device_ip_suspend_phase2+0x240/0x470 [amdgpu] [ 50.497802] amdgpu_device_ip_suspend+0x41/0x80 [amdgpu] [ 50.497854] amdgpu_device_pre_asic_reset+0xd9/0x4a0 [amdgpu] [ 50.497905] amdgpu_device_gpu_recover.cold+0x548/0xcf1 [amdgpu] [ 50.498005] amdgpu_debugfs_reset_work+0x4c/0x80 [amdgpu] [ 50.498060] process_one_work+0x21f/0x400 [ 50.498063] worker_thread+0x200/0x3f0 [ 50.498064] ? process_one_work+0x400/0x400 [ 50.498065] kthread+0xee/0x120 [ 50.498067] ? kthread_complete_and_exit+0x20/0x20 [ 50.498068] ret_from_fork+0x22/0x30 Suggested-by: Hawking Zhang Signed-off-by: Horatio Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c | 28 +++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 77e1e64aa1d1c..a3d83c9f2c9a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle) /* JPEG DJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq); + VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); if (r) return r; /* JPEG EJPEG POISON EVENT */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq); + VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq); if (r) return r; @@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle) RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS)) jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE); } - amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0); + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) + amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0); return 0; } @@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev, return 0; } +static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, case VCN_4_0__SRCID__JPEG_DECODE: amdgpu_fence_process(&adev->jpeg.inst->ring_dec); break; - case VCN_4_0__SRCID_DJPEG0_POISON: - case VCN_4_0__SRCID_EJPEG0_POISON: - amdgpu_jpeg_process_poison_irq(adev, source, entry); - break; default: DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); @@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = { .process = jpeg_v4_0_process_interrupt, }; +static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = { + .set = jpeg_v4_0_set_ras_interrupt_state, + .process = amdgpu_jpeg_process_poison_irq, +}; + static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev) { adev->jpeg.inst->irq.num_types = 1; adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs; + + adev->jpeg.inst->ras_poison_irq.num_types = 1; + adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs; } const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = { @@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = { static struct amdgpu_jpeg_ras jpeg_v4_0_ras = { .ras_block = { .hw_ops = &jpeg_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_jpeg_ras_late_init, }, }; From e490d60a2f76bff636c68ce4fe34c1b6c34bbd86 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Wed, 24 May 2023 15:03:02 +0800 Subject: [PATCH 255/303] drm/amd/pm: resolve reboot exception for si oland During reboot test on arm64 platform, it may failure on boot. The error message are as follows: [ 1.706570][ 3] [ T273] [drm:si_thermal_enable_alert [amdgpu]] *ERROR* Could not enable thermal interrupts. [ 1.716547][ 3] [ T273] [drm:amdgpu_device_ip_late_init [amdgpu]] *ERROR* late_init of IP block failed -22 [ 1.727064][ 3] [ T273] amdgpu 0000:02:00.0: amdgpu_device_ip_late_init failed [ 1.734367][ 3] [ T273] amdgpu 0000:02:00.0: Fatal error during GPU init v2: squash in built warning fix (Alex) Signed-off-by: Zhenneng Li Reviewed-by: Guchun Chen Signed-off-by: Guchun Chen Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 29 ---------------------- 1 file changed, 29 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index d6d9e3b1b2c0e..02e69ccff3bac 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev) return 0; } -static int si_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = si_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - - return ret; -} - static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev, static int si_dpm_late_init(void *handle) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - ret = si_set_temperature_range(adev); - if (ret) - return ret; -#if 0 //TODO ? - si_dpm_powergate_uvd(adev, true); -#endif return 0; } From 663b930e24842f3d3bb79418bb5cd8d01b40c559 Mon Sep 17 00:00:00 2001 From: Ikshwaku Chauhan Date: Thu, 25 May 2023 10:57:26 +0530 Subject: [PATCH 256/303] drm/amdgpu: enable tmz by default for GC 11.0.1 Add IP GC 11.0.1 in the list of target to have tmz enabled by default. Signed-off-by: Ikshwaku Chauhan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org # 6.1.x --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 4e2531758866c..95b0f984acbfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(9, 3, 0): /* GC 10.3.7 */ case IP_VERSION(10, 3, 7): + /* GC 11.0.1 */ + case IP_VERSION(11, 0, 1): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): /* Don't enable it by default yet. */ From 1c4c769cdf682c63d3b10cb241f4a96ebad2f215 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Tue, 30 May 2023 11:59:34 +0300 Subject: [PATCH 257/303] net/mlx5: Remove rmap also in case dynamic MSIX not supported mlx5 add IRQs to rmap upon MSIX request, and mlx5 remove rmap from MSIX only if msi_map.index is populated. However, msi_map.index is populated only when dynamic MSIX is supported. This results in freeing IRQs without removing them from rmap, which triggers the bellow WARN_ON[1]. rmap is a feature which have no relation to dynamic MSIX. Hence, remove the check of msi_map.index when removing IRQ from rmap. [1] [ 200.307160 ] WARNING: CPU: 20 PID: 1702 at kernel/irq/manage.c:2034 free_irq+0x2ac/0x358 [ 200.316990 ] CPU: 20 PID: 1702 Comm: modprobe Not tainted 6.4.0-rc3_for_upstream_min_debug_2023_05_24_14_02 #1 [ 200.318939 ] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 [ 200.321659 ] pc : free_irq+0x2ac/0x358 [ 200.322400 ] lr : free_irq+0x20/0x358 [ 200.337865 ] Call trace: [ 200.338360 ] free_irq+0x2ac/0x358 [ 200.339029 ] irq_release+0x58/0xd0 [mlx5_core] [ 200.340093 ] mlx5_irqs_release_vectors+0x80/0xb0 [mlx5_core] [ 200.341344 ] destroy_comp_eqs+0x120/0x170 [mlx5_core] [ 200.342469 ] mlx5_eq_table_destroy+0x1c/0x38 [mlx5_core] [ 200.343645 ] mlx5_unload+0x8c/0xc8 [mlx5_core] [ 200.344652 ] mlx5_uninit_one+0x78/0x118 [mlx5_core] [ 200.345745 ] remove_one+0x80/0x108 [mlx5_core] [ 200.346752 ] pci_device_remove+0x40/0xd8 [ 200.347554 ] device_remove+0x50/0x88 [ 200.348272 ] device_release_driver_internal+0x1c4/0x228 [ 200.349312 ] driver_detach+0x54/0xa0 [ 200.350030 ] bus_remove_driver+0x74/0x100 [ 200.350833 ] driver_unregister+0x34/0x68 [ 200.351619 ] pci_unregister_driver+0x28/0xa0 [ 200.352476 ] mlx5_cleanup+0x14/0x2210 [mlx5_core] [ 200.353536 ] __arm64_sys_delete_module+0x190/0x2e8 [ 200.354495 ] el0_svc_common.constprop.0+0x6c/0x1d0 [ 200.355455 ] do_el0_svc+0x38/0x98 [ 200.356122 ] el0_svc+0x1c/0x80 [ 200.356739 ] el0t_64_sync_handler+0xb4/0x130 [ 200.357604 ] el0t_64_sync+0x174/0x178 [ 200.358345 ] ---[ end trace 0000000000000000 ]--- Fixes: 3354822cde5a ("net/mlx5: Use dynamic msix vectors allocation") Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index db5687d9fec97..86ac4a85fd878 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -141,7 +141,7 @@ static void irq_release(struct mlx5_irq *irq) irq_update_affinity_hint(irq->map.virq, NULL); #ifdef CONFIG_RFS_ACCEL rmap = mlx5_eq_table_get_rmap(pool->dev); - if (rmap && irq->map.index) + if (rmap) irq_cpu_rmap_remove(rmap, irq->map.virq); #endif From 8764bd0fa5d402c51b136f6aeaba20fc16961ba1 Mon Sep 17 00:00:00 2001 From: Niklas Schnelle Date: Wed, 31 May 2023 10:48:56 +0200 Subject: [PATCH 258/303] net/mlx5: Fix setting of irq->map.index for static IRQ case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When dynamic IRQ allocation is not supported all IRQs are allocated up front in mlx5_irq_table_create() instead of dynamically as part of mlx5_irq_alloc(). In the latter dynamic case irq->map.index is set via the mapping returned by pci_msix_alloc_irq_at(). In the static case and prior to commit 1da438c0ae02 ("net/mlx5: Fix indexing of mlx5_irq") irq->map.index was set in mlx5_irq_alloc() twice once initially to 0 and then to the requested index before storing in the xarray. After this commit it is only set to 0 which breaks all other IRQ mappings. Fix this by setting irq->map.index to the requested index together with irq->map.virq and improve the related comment to make it clearer which cases it deals with. Cc: Chuck Lever III Tested-by: Mark Brown Reviewed-by: Mark Brown Reviewed-by: Simon Horman Reviewed-by: Eli Cohen Fixes: 1da438c0ae02 ("net/mlx5: Fix indexing of mlx5_irq") Signed-off-by: Niklas Schnelle Tested-by: Cédric Le Goater Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 86ac4a85fd878..38edd485ba6f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -232,12 +232,13 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, if (!irq) return ERR_PTR(-ENOMEM); if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) { - /* The vector at index 0 was already allocated. - * Just get the irq number. If dynamic irq is not supported - * vectors have also been allocated. + /* The vector at index 0 is always statically allocated. If + * dynamic irq is not supported all vectors are statically + * allocated. In both cases just get the irq number and set + * the index. */ irq->map.virq = pci_irq_vector(dev->pdev, i); - irq->map.index = 0; + irq->map.index = i; } else { irq->map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, af_desc); if (!irq->map.virq) { From 368591995d010e639ad8f28b27f1b721f0872342 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 31 May 2023 15:48:25 -0400 Subject: [PATCH 259/303] net/mlx5: Ensure af_desc.mask is properly initialized [ 9.837087] mlx5_core 0000:02:00.0: firmware version: 16.35.2000 [ 9.843126] mlx5_core 0000:02:00.0: 126.016 Gb/s available PCIe bandwidth (8.0 GT/s PCIe x16 link) [ 10.311515] mlx5_core 0000:02:00.0: Rate limit: 127 rates are supported, range: 0Mbps to 97656Mbps [ 10.321948] mlx5_core 0000:02:00.0: E-Switch: Total vports 2, per vport: max uc(128) max mc(2048) [ 10.344324] mlx5_core 0000:02:00.0: mlx5_pcie_event:301:(pid 88): PCIe slot advertised sufficient power (27W). [ 10.354339] BUG: unable to handle page fault for address: ffffffff8ff0ade0 [ 10.361206] #PF: supervisor read access in kernel mode [ 10.366335] #PF: error_code(0x0000) - not-present page [ 10.371467] PGD 81ec39067 P4D 81ec39067 PUD 81ec3a063 PMD 114b07063 PTE 800ffff7e10f5062 [ 10.379544] Oops: 0000 [#1] PREEMPT SMP PTI [ 10.383721] CPU: 0 PID: 117 Comm: kworker/0:6 Not tainted 6.3.0-13028-g7222f123c983 #1 [ 10.391625] Hardware name: Supermicro X10SRA-F/X10SRA-F, BIOS 2.0b 06/12/2017 [ 10.398750] Workqueue: events work_for_cpu_fn [ 10.403108] RIP: 0010:__bitmap_or+0x10/0x26 [ 10.407286] Code: 85 c0 0f 95 c0 c3 cc cc cc cc 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 89 c9 31 c0 48 83 c1 3f 48 c1 e9 06 39 c> [ 10.426024] RSP: 0000:ffffb45a0078f7b0 EFLAGS: 00010097 [ 10.431240] RAX: 0000000000000000 RBX: ffffffff8ff0adc0 RCX: 0000000000000004 [ 10.438365] RDX: ffff9156801967d0 RSI: ffffffff8ff0ade0 RDI: ffff9156801967b0 [ 10.445489] RBP: ffffb45a0078f7e8 R08: 0000000000000030 R09: 0000000000000000 [ 10.452613] R10: 0000000000000000 R11: 0000000000000000 R12: 00000000000000ec [ 10.459737] R13: ffffffff8ff0ade0 R14: 0000000000000001 R15: 0000000000000020 [ 10.466862] FS: 0000000000000000(0000) GS:ffff9165bfc00000(0000) knlGS:0000000000000000 [ 10.474936] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 10.480674] CR2: ffffffff8ff0ade0 CR3: 00000001011ae003 CR4: 00000000003706f0 [ 10.487800] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 10.494922] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 10.502046] Call Trace: [ 10.504493] [ 10.506589] ? matrix_alloc_area.constprop.0+0x43/0x9a [ 10.511729] ? prepare_namespace+0x84/0x174 [ 10.515914] irq_matrix_reserve_managed+0x56/0x10c [ 10.520699] x86_vector_alloc_irqs+0x1d2/0x31e [ 10.525146] irq_domain_alloc_irqs_hierarchy+0x39/0x3f [ 10.530284] irq_domain_alloc_irqs_parent+0x1a/0x2a [ 10.535155] intel_irq_remapping_alloc+0x59/0x5e9 [ 10.539859] ? kmem_cache_debug_flags+0x11/0x26 [ 10.544383] ? __radix_tree_lookup+0x39/0xb9 [ 10.548649] irq_domain_alloc_irqs_hierarchy+0x39/0x3f [ 10.553779] irq_domain_alloc_irqs_parent+0x1a/0x2a [ 10.558650] msi_domain_alloc+0x8c/0x120 [ 10.567697] irq_domain_alloc_irqs_locked+0x11d/0x286 [ 10.572741] __irq_domain_alloc_irqs+0x72/0x93 [ 10.577179] __msi_domain_alloc_irqs+0x193/0x3f1 [ 10.581789] ? __xa_alloc+0xcf/0xe2 [ 10.585273] msi_domain_alloc_irq_at+0xa8/0xfe [ 10.589711] pci_msix_alloc_irq_at+0x47/0x5c The crash is due to matrix_alloc_area() attempting to access per-CPU memory for CPUs that are not present on the system. The CPU mask passed into reserve_managed_vector() via it's @irqd parameter is corrupted because it contains uninitialized stack data. Fixes: bbac70c74183 ("net/mlx5: Use newer affinity descriptor") Reviewed-by: Thomas Gleixner Signed-off-by: Chuck Lever Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 38edd485ba6f3..843da89a90350 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -571,11 +571,11 @@ int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs, af_desc.is_managed = false; for (i = 0; i < nirqs; i++) { + cpumask_clear(&af_desc.mask); cpumask_set_cpu(cpus[i], &af_desc.mask); irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap); if (IS_ERR(irq)) break; - cpumask_clear(&af_desc.mask); irqs[i] = irq; } From b6193d7030e3c59f1d4c75648c9c8fa40cad2bcd Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Sat, 27 May 2023 23:07:08 -0700 Subject: [PATCH 260/303] net/mlx5e: Fix error handling in mlx5e_refresh_tirs Allocation failure is outside the critical lock section and should return immediately rather than jumping to the unlock section. Also unlock as soon as required and remove the now redundant jump label. Fixes: 80a2a9026b24 ("net/mlx5e: Add a lock on tir list") Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_common.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 1f90594499c60..41c396e764579 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -150,10 +150,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - err = -ENOMEM; - goto out; - } + if (!in) + return -ENOMEM; if (enable_uc_lb) lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; @@ -171,14 +169,13 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, tirn = tir->tirn; err = mlx5_core_modify_tir(mdev, tirn, in); if (err) - goto out; + break; } + mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); -out: kvfree(in); if (err) netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); - mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); return err; } From bbfa4b58997e3d38ba629c9f6fc0bd1c163aaf43 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Fri, 28 Apr 2023 13:48:13 +0300 Subject: [PATCH 261/303] net/mlx5: Read embedded cpu after init bit cleared During driver load it reads embedded_cpu bit from initialization segment, but the initialization segment is readable only after initialization bit is cleared. Move the call to mlx5_read_embedded_cpu() right after initialization bit cleared. Signed-off-by: Moshe Shemesh Fixes: 591905ba9679 ("net/mlx5: Introduce Mellanox SmartNIC and modify page management logic") Reviewed-by: Shay Drory Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2132a65106391..d6ee016deae17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -923,7 +923,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, } mlx5_pci_vsc_init(dev); - dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); return 0; err_clr_master: @@ -1155,6 +1154,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout goto err_cmd_cleanup; } + dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); mlx5_start_health_poll(dev); From 622ab656344a288acf4fb03d628c3bb5dd241f34 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 May 2023 21:25:27 +0100 Subject: [PATCH 262/303] sfc: fix error unwinds in TC offload Failure ladders weren't exactly unwinding what the function had done up to that point; most seriously, when we encountered an already offloaded rule, the failure path tried to remove the new rule from the hashtable, which would in fact remove the already-present 'old' rule (since it has the same key) from the table, and leak its resources. Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202305200745.xmIlkqjH-lkp@intel.com/ Fixes: d902e1a737d4 ("sfc: bare bones TC offload on EF100") Fixes: 17654d84b47c ("sfc: add offloading of 'foreign' TC (decap) rules") Signed-off-by: Edward Cree Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230530202527.53115-1-edward.cree@amd.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/sfc/tc.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 0327639a628a9..c004443c1d58c 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -624,13 +624,12 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, if (!found) { /* We don't care. */ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter that doesn't egdev us\n"); - rc = -EOPNOTSUPP; - goto release; + return -EOPNOTSUPP; } rc = efx_mae_match_check_caps(efx, &match.mask, NULL); if (rc) - goto release; + return rc; if (efx_tc_match_is_encap(&match.mask)) { enum efx_encap_type type; @@ -639,8 +638,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, if (type == EFX_ENCAP_TYPE_NONE) { NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device"); - rc = -EOPNOTSUPP; - goto release; + return -EOPNOTSUPP; } rc = efx_mae_check_encap_type_supported(efx, type); @@ -648,25 +646,24 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, NL_SET_ERR_MSG_FMT_MOD(extack, "Firmware reports no support for %s encap match", efx_tc_encap_type_name(type)); - goto release; + return rc; } rc = efx_tc_flower_record_encap_match(efx, &match, type, extack); if (rc) - goto release; + return rc; } else { /* This is not a tunnel decap rule, ignore it */ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign filter without encap match\n"); - rc = -EOPNOTSUPP; - goto release; + return -EOPNOTSUPP; } rule = kzalloc(sizeof(*rule), GFP_USER); if (!rule) { rc = -ENOMEM; - goto release; + goto out_free; } INIT_LIST_HEAD(&rule->acts.list); rule->cookie = tc->cookie; @@ -678,7 +675,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, "Ignoring already-offloaded rule (cookie %lx)\n", tc->cookie); rc = -EEXIST; - goto release; + goto out_free; } act = kzalloc(sizeof(*act), GFP_USER); @@ -843,6 +840,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx, efx_tc_match_action_ht_params); efx_tc_free_action_set_list(efx, &rule->acts, false); } +out_free: kfree(rule); if (match.encap) efx_tc_flower_release_encap_match(efx, match.encap); @@ -899,8 +897,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx, return rc; if (efx_tc_match_is_encap(&match.mask)) { NL_SET_ERR_MSG_MOD(extack, "Ingress enc_key matches not supported"); - rc = -EOPNOTSUPP; - goto release; + return -EOPNOTSUPP; } if (tc->common.chain_index) { @@ -924,9 +921,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx, if (old) { netif_dbg(efx, drv, efx->net_dev, "Already offloaded rule (cookie %lx)\n", tc->cookie); - rc = -EEXIST; NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); - goto release; + kfree(rule); + return -EEXIST; } /* Parse actions */ From 62fe398761cd06a428e6f367aba84732a2f1c268 Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Tue, 23 May 2023 13:40:42 -0700 Subject: [PATCH 263/303] drm/i915/perf: Clear out entire reports after reading if not power of 2 size Clearing out report id and timestamp as means to detect unlanded reports only works if report size is power of 2. That is, only when report size is a sub-multiple of the OA buffer size can we be certain that reports will land at the same place each time in the OA buffer (after rewind). If report size is not a power of 2, we need to zero out the entire report to be able to detect unlanded reports reliably. v2: Add Fixes tag (Umesh) Fixes: 1cc064dce4ed ("drm/i915/perf: Add support for OA media units") Reviewed-by: Umesh Nerlige Ramappa Reviewed-by: Lionel Landwerlin Signed-off-by: Ashutosh Dixit Link: https://patchwork.freedesktop.org/patch/msgid/20230523204042.4180641-1-ashutosh.dixit@intel.com (cherry picked from commit 09a36015d9a0940214c080f95afc605c47648bbd) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_perf.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 050b8ae7b8e70..3035cba2c6a29 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -877,12 +877,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, stream->oa_buffer.last_ctx_id = ctx_id; } - /* - * Clear out the report id and timestamp as a means to detect unlanded - * reports. - */ - oa_report_id_clear(stream, report32); - oa_timestamp_clear(stream, report32); + if (is_power_of_2(report_size)) { + /* + * Clear out the report id and timestamp as a means + * to detect unlanded reports. + */ + oa_report_id_clear(stream, report32); + oa_timestamp_clear(stream, report32); + } else { + /* Zero out the entire report */ + memset(report32, 0, report_size); + } } if (start_offset != *offset) { From b3fc95709c54ffbe80f16801e0a792a4d2b3d55e Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Fri, 26 May 2023 16:53:59 +0800 Subject: [PATCH 264/303] iommu/mediatek: Flush IOTLB completely only if domain has been attached If an IOMMU domain was never attached, it lacks any linkage to the actual IOMMU hardware. Attempting to do flush_iotlb_all() on it will result in a NULL pointer dereference. This seems to happen after the recent IOMMU core rework in v6.4-rc1. Unable to handle kernel read from unreadable memory at virtual address 0000000000000018 Call trace: mtk_iommu_flush_iotlb_all+0x20/0x80 iommu_create_device_direct_mappings.part.0+0x13c/0x230 iommu_setup_default_domain+0x29c/0x4d0 iommu_probe_device+0x12c/0x190 of_iommu_configure+0x140/0x208 of_dma_configure_id+0x19c/0x3c0 platform_dma_configure+0x38/0x88 really_probe+0x78/0x2c0 Check if the "bank" field has been filled in before actually attempting the IOTLB flush to avoid it. The IOTLB is also flushed when the device comes out of runtime suspend, so it should have a clean initial state. Fixes: 08500c43d4f7 ("iommu/mediatek: Adjust the structure") Signed-off-by: Chen-Yu Tsai Reviewed-by: Yong Wu Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20230526085402.394239-1-wenst@chromium.org Signed-off-by: Joerg Roedel --- drivers/iommu/mtk_iommu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index aecc7d154f28e..e93906d6e112e 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -781,7 +781,8 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - mtk_iommu_tlb_flush_all(dom->bank->parent_data); + if (dom->bank) + mtk_iommu_tlb_flush_all(dom->bank->parent_data); } static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, From 4d56304e5827c8cc8cc18c75343d283af7c4825c Mon Sep 17 00:00:00 2001 From: Hangyu Hua Date: Wed, 31 May 2023 18:28:04 +0800 Subject: [PATCH 265/303] net/sched: flower: fix possible OOB write in fl_set_geneve_opt() If we send two TCA_FLOWER_KEY_ENC_OPTS_GENEVE packets and their total size is 252 bytes(key->enc_opts.len = 252) then key->enc_opts.len = opt->length = data_len / 4 = 0 when the third TCA_FLOWER_KEY_ENC_OPTS_GENEVE packet enters fl_set_geneve_opt. This bypasses the next bounds check and results in an out-of-bounds. Fixes: 0a6e77784f49 ("net/sched: allow flower to match tunnel options") Signed-off-by: Hangyu Hua Reviewed-by: Simon Horman Reviewed-by: Pieter Jansen van Vuuren Link: https://lore.kernel.org/r/20230531102805.27090-1-hbh25y@gmail.com Signed-off-by: Paolo Abeni --- net/sched/cls_flower.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9dbc43388e579..815c3e416bc54 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1153,6 +1153,9 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, if (option_len > sizeof(struct geneve_opt)) data_len = option_len - sizeof(struct geneve_opt); + if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4) + return -ERANGE; + opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; memset(opt, 0xff, option_len); opt->length = data_len / 4; From 30c6f0bf9579debce27e45fac34fdc97e46acacc Mon Sep 17 00:00:00 2001 From: fuyuanli Date: Wed, 31 May 2023 16:01:50 +0800 Subject: [PATCH 266/303] tcp: fix mishandling when the sack compression is deferred. In this patch, we mainly try to handle sending a compressed ack correctly if it's deferred. Here are more details in the old logic: When sack compression is triggered in the tcp_compressed_ack_kick(), if the sock is owned by user, it will set TCP_DELACK_TIMER_DEFERRED and then defer to the release cb phrase. Later once user releases the sock, tcp_delack_timer_handler() should send a ack as expected, which, however, cannot happen due to lack of ICSK_ACK_TIMER flag. Therefore, the receiver would not sent an ack until the sender's retransmission timeout. It definitely increases unnecessary latency. Fixes: 5d9f4262b7ea ("tcp: add SACK compression") Suggested-by: Eric Dumazet Signed-off-by: fuyuanli Signed-off-by: Jason Xing Link: https://lore.kernel.org/netdev/20230529113804.GA20300@didi-ThinkCentre-M920t-N000/ Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230531080150.GA20424@didi-ThinkCentre-M920t-N000 Signed-off-by: Paolo Abeni --- include/net/tcp.h | 1 + net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_timer.c | 16 +++++++++++++--- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 18a038d164344..5066e4586cf09 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -632,6 +632,7 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb); void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); +void tcp_sack_compress_send_ack(struct sock *sk); /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 61b6710f337a3..bf8b22218dd46 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4530,7 +4530,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) } } -static void tcp_sack_compress_send_ack(struct sock *sk) +void tcp_sack_compress_send_ack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b839c2f91292f..39eb947fe3920 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -290,9 +290,19 @@ static int tcp_write_timeout(struct sock *sk) void tcp_delack_timer_handler(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); - if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || - !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) + if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) + return; + + /* Handling the sack compression case */ + if (tp->compressed_ack) { + tcp_mstamp_refresh(tp); + tcp_sack_compress_send_ack(sk); + return; + } + + if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) return; if (time_after(icsk->icsk_ack.timeout, jiffies)) { @@ -312,7 +322,7 @@ void tcp_delack_timer_handler(struct sock *sk) inet_csk_exit_pingpong_mode(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; } - tcp_mstamp_refresh(tcp_sk(sk)); + tcp_mstamp_refresh(tp); tcp_send_ack(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); } From be7f8012a513f5099916ee2da28420156cbb8cf3 Mon Sep 17 00:00:00 2001 From: Bert Karwatzki Date: Wed, 31 May 2023 12:36:19 +0200 Subject: [PATCH 267/303] net: ipa: Use correct value for IPA_STATUS_SIZE IPA_STATUS_SIZE was introduced in commit b8dc7d0eea5a as a replacement for the size of the removed struct ipa_status which had size sizeof(__le32[8]). Use this value as IPA_STATUS_SIZE. Fixes: b8dc7d0eea5a ("net: ipa: stop using sizeof(status)") Signed-off-by: Bert Karwatzki Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230531103618.102608-1-spasswolf@web.de Signed-off-by: Paolo Abeni --- drivers/net/ipa/ipa_endpoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 2ee80ed140b72..afa1d56d9095c 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -119,7 +119,7 @@ enum ipa_status_field_id { }; /* Size in bytes of an IPA packet status structure */ -#define IPA_STATUS_SIZE sizeof(__le32[4]) +#define IPA_STATUS_SIZE sizeof(__le32[8]) /* IPA status structure decoder; looks up field values for a structure */ static u32 ipa_status_extract(struct ipa *ipa, const void *data, From b675df0257bb717082f592626da3ddfc5bdc2b6b Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 1 Jun 2023 18:51:34 +0800 Subject: [PATCH 268/303] btrfs: zoned: fix dev-replace after the scrub rework [BUG] After commit e02ee89baa66 ("btrfs: scrub: switch scrub_simple_mirror() to scrub_stripe infrastructure"), scrub no longer works for zoned device at all. Even an empty zoned btrfs cannot be replaced: # mkfs.btrfs -f /dev/nvme0n1 # mount /dev/nvme0n1 /mnt/btrfs # btrfs replace start -Bf 1 /dev/nvme0n2 /mnt/btrfs Resetting device zones /dev/nvme1n1 (160 zones) ... ERROR: ioctl(DEV_REPLACE_START) failed on "/mnt/btrfs/": Input/output error And we can hit kernel crash related to that: BTRFS info (device nvme1n1): host-managed zoned block device /dev/nvme3n1, 160 zones of 134217728 bytes BTRFS info (device nvme1n1): dev_replace from /dev/nvme2n1 (devid 2) to /dev/nvme3n1 started nvme3n1: Zone Management Append(0x7d) @ LBA 65536, 4 blocks, Zone Is Full (sct 0x1 / sc 0xb9) DNR I/O error, dev nvme3n1, sector 786432 op 0xd:(ZONE_APPEND) flags 0x4000 phys_seg 3 prio class 2 BTRFS error (device nvme1n1): bdev /dev/nvme3n1 errs: wr 1, rd 0, flush 0, corrupt 0, gen 0 BUG: kernel NULL pointer dereference, address: 00000000000000a8 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 RIP: 0010:_raw_spin_lock_irqsave+0x1e/0x40 Call Trace: btrfs_lookup_ordered_extent+0x31/0x190 btrfs_record_physical_zoned+0x18/0x40 btrfs_simple_end_io+0xaf/0xc0 blk_update_request+0x153/0x4c0 blk_mq_end_request+0x15/0xd0 nvme_poll_cq+0x1d3/0x360 nvme_irq+0x39/0x80 __handle_irq_event_percpu+0x3b/0x190 handle_irq_event+0x2f/0x70 handle_edge_irq+0x7c/0x210 __common_interrupt+0x34/0xa0 common_interrupt+0x7d/0xa0 asm_common_interrupt+0x22/0x40 [CAUSE] Dev-replace reuses scrub code to iterate all extents and write the existing content back to the new device. And for zoned devices, we call fill_writer_pointer_gap() to make sure all the writes into the zoned device is sequential, even if there may be some gaps between the writes. However we have several different bugs all related to zoned dev-replace: - We are using ZONE_APPEND operation for metadata style write back For zoned devices, btrfs has two ways to write data: * ZONE_APPEND for data This allows higher queue depth, but will not be able to know where the write would land. Thus needs to grab the real on-disk physical location in it's endio. * WRITE for metadata This requires single queue depth (new writes can only be submitted after previous one finished), and all writes must be sequential. For scrub, we go single queue depth, but still goes with ZONE_APPEND, which requires btrfs_bio::inode being populated. This is the cause of that crash. - No correct tracing of write_pointer After a write finished, we should forward sctx->write_pointer, or fill_writer_pointer_gap() would not work properly and cause more than necessary zero out, and fill the whole zone prematurely. - Incorrect physical bytenr passed to fill_writer_pointer_gap() In scrub_write_sectors(), one call site passes logical address, which is completely wrong. The other call site passes physical address of current sector, but we should pass the physical address of the btrfs_bio we're submitting. This is the cause of the -EIO errors. [FIX] - Do not use ZONE_APPEND for btrfs_submit_repair_write(). - Manually forward sctx->write_pointer after successful writeback - Use the physical address of the to-be-submitted btrfs_bio for fill_writer_pointer_gap() Now zoned device replace would work as expected. Reported-by: Christoph Hellwig Fixes: e02ee89baa66 ("btrfs: scrub: switch scrub_simple_mirror() to scrub_stripe infrastructure") Reviewed-by: Christoph Hellwig Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/bio.c | 4 ---- fs/btrfs/scrub.c | 48 ++++++++++++++++++++++++++++++++---------------- 2 files changed, 32 insertions(+), 20 deletions(-) diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 35d34c731d72c..b3ad0f51e6162 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -811,10 +811,6 @@ void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_ goto fail; if (dev_replace) { - if (btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE && btrfs_is_zoned(fs_info)) { - bbio->bio.bi_opf &= ~REQ_OP_WRITE; - bbio->bio.bi_opf |= REQ_OP_ZONE_APPEND; - } ASSERT(smap.dev == fs_info->dev_replace.srcdev); smap.dev = fs_info->dev_replace.tgtdev; } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index dd37cba580222..7c666517d3d33 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1137,6 +1137,35 @@ static void scrub_write_endio(struct btrfs_bio *bbio) wake_up(&stripe->io_wait); } +static void scrub_submit_write_bio(struct scrub_ctx *sctx, + struct scrub_stripe *stripe, + struct btrfs_bio *bbio, bool dev_replace) +{ + struct btrfs_fs_info *fs_info = sctx->fs_info; + u32 bio_len = bbio->bio.bi_iter.bi_size; + u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) - + stripe->logical; + + fill_writer_pointer_gap(sctx, stripe->physical + bio_off); + atomic_inc(&stripe->pending_io); + btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); + if (!btrfs_is_zoned(fs_info)) + return; + /* + * For zoned writeback, queue depth must be 1, thus we must wait for + * the write to finish before the next write. + */ + wait_scrub_stripe_io(stripe); + + /* + * And also need to update the write pointer if write finished + * successfully. + */ + if (!test_bit(bio_off >> fs_info->sectorsize_bits, + &stripe->write_error_bitmap)) + sctx->write_pointer += bio_len; +} + /* * Submit the write bio(s) for the sectors specified by @write_bitmap. * @@ -1155,7 +1184,6 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str { struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct btrfs_bio *bbio = NULL; - const bool zoned = btrfs_is_zoned(fs_info); int sector_nr; for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { @@ -1168,13 +1196,7 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str /* Cannot merge with previous sector, submit the current one. */ if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) { - fill_writer_pointer_gap(sctx, stripe->physical + - (sector_nr << fs_info->sectorsize_bits)); - atomic_inc(&stripe->pending_io); - btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); - /* For zoned writeback, queue depth must be 1. */ - if (zoned) - wait_scrub_stripe_io(stripe); + scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); bbio = NULL; } if (!bbio) { @@ -1187,14 +1209,8 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff); ASSERT(ret == fs_info->sectorsize); } - if (bbio) { - fill_writer_pointer_gap(sctx, bbio->bio.bi_iter.bi_sector << - SECTOR_SHIFT); - atomic_inc(&stripe->pending_io); - btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); - if (zoned) - wait_scrub_stripe_io(stripe); - } + if (bbio) + scrub_submit_write_bio(sctx, stripe, bbio, dev_replace); } /* From 4420528254153189c70b6267593e445dc8654e37 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 29 May 2023 12:52:07 -0600 Subject: [PATCH 269/303] firewire: Replace zero-length array with flexible-array member MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zero-length and one-element arrays are deprecated, and we are moving towards adopting C99 flexible-array members, instead. Address the following warnings found with GCC-13 and -fstrict-flex-arrays=3 enabled: sound/firewire/amdtp-stream.c: In function ‘build_it_pkt_header’: sound/firewire/amdtp-stream.c:694:17: warning: ‘generate_cip_header’ accessing 8 bytes in a region of size 0 [-Wstringop-overflow=] 694 | generate_cip_header(s, cip_header, data_block_counter, syt); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ sound/firewire/amdtp-stream.c:694:17: note: referencing argument 2 of type ‘__be32[2]’ {aka ‘unsigned int[2]’} sound/firewire/amdtp-stream.c:667:13: note: in a call to function ‘generate_cip_header’ 667 | static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], | ^~~~~~~~~~~~~~~~~~~ This helps with the ongoing efforts to tighten the FORTIFY_SOURCE routines on memcpy() and help us make progress towards globally enabling -fstrict-flex-arrays=3 [1]. Link: https://github.com/KSPP/linux/issues/21 Link: https://github.com/KSPP/linux/issues/303 Link: https://gcc.gnu.org/pipermail/gcc-patches/2022-October/602902.html [1] Signed-off-by: Gustavo A. R. Silva Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/ZHT0V3SpvHyxCv5W@work Signed-off-by: Takashi Sakamoto --- include/linux/firewire.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 1716c01c4e541..efb6e2cf20343 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -391,7 +391,7 @@ struct fw_iso_packet { u32 tag:2; /* tx: Tag in packet header */ u32 sy:4; /* tx: Sy in packet header */ u32 header_length:8; /* Length of immediate header */ - u32 header[0]; /* tx: Top of 1394 isoch. data_block */ + u32 header[]; /* tx: Top of 1394 isoch. data_block */ }; #define FW_ISO_CONTEXT_TRANSMIT 0 From 3c27f3d53d588618d81d30d6712459a3cc9489b8 Mon Sep 17 00:00:00 2001 From: Andreas Svensson Date: Tue, 30 May 2023 16:52:23 +0200 Subject: [PATCH 270/303] net: dsa: mv88e6xxx: Increase wait after reset deactivation A switch held in reset by default needs to wait longer until we can reliably detect it. An issue was observed when testing on the Marvell 88E6393X (Link Street). The driver failed to detect the switch on some upstarts. Increasing the wait time after reset deactivation solves this issue. The updated wait time is now also the same as the wait time in the mv88e6xxx_hardware_reset function. Fixes: 7b75e49de424 ("net: dsa: mv88e6xxx: wait after reset deactivation") Signed-off-by: Andreas Svensson Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20230530145223.1223993-1-andreas.svensson@axis.com Signed-off-by: Paolo Abeni --- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 64a2f2f83735d..08a46ffd53af9 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -7170,7 +7170,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) goto out; } if (chip->reset) - usleep_range(1000, 2000); + usleep_range(10000, 20000); /* Detect if the device is configured in single chip addressing mode, * otherwise continue with address specific smi init/detection. From 444c17cfbc855bf6c1524f3813f6f667d9b708ff Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 26 May 2023 19:41:54 +0200 Subject: [PATCH 271/303] MAINTAINERS: Add myself as reviewer instead of Naga Naga no longer works for AMD/Xilinx and there is no activity from him to continue to maintain Xilinx related drivers. Add myself instead to be kept in loop if there is any need for testing. Signed-off-by: Michal Simek Signed-off-by: Miquel Raynal [: Manually apply on top of the latest -rc which where the MAINTAINERS file got sorted] Link: https://lore.kernel.org/linux-mtd/06df49c300c53a27423260e99acc217b06d4e588.1684827820.git.michal.simek@amd.com --- MAINTAINERS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 27ef116247481..2a6606a772af5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1600,7 +1600,7 @@ F: drivers/media/i2c/ar0521.c ARASAN NAND CONTROLLER DRIVER M: Miquel Raynal -M: Naga Sureshkumar Relli +R: Michal Simek L: linux-mtd@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml @@ -1763,7 +1763,7 @@ F: include/linux/amba/mmci.h ARM PRIMECELL PL35X NAND CONTROLLER DRIVER M: Miquel Raynal -M: Naga Sureshkumar Relli +R: Michal Simek L: linux-mtd@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml @@ -1771,7 +1771,7 @@ F: drivers/mtd/nand/raw/pl35x-nand-controller.c ARM PRIMECELL PL35X SMC DRIVER M: Miquel Raynal -M: Naga Sureshkumar Relli +R: Michal Simek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/memory-controllers/arm,pl35x-smc.yaml From 0ea923f443350c8c5cca6eef5b748d52b903f46c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 17 Apr 2023 22:56:50 +0200 Subject: [PATCH 272/303] mtdchar: mark bits of ioctl handler noinline The addition of the mtdchar_read_ioctl() function caused the stack usage of mtdchar_ioctl() to grow beyond the warning limit on 32-bit architectures with gcc-13: drivers/mtd/mtdchar.c: In function 'mtdchar_ioctl': drivers/mtd/mtdchar.c:1229:1: error: the frame size of 1488 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] Mark both the read and write portions as noinline_for_stack to ensure they don't get inlined and use separate stack slots to reduce the maximum usage, both in the mtdchar_ioctl() and combined with any of its callees. Fixes: 095bb6e44eb1 ("mtdchar: add MEMREAD ioctl") Cc: stable@vger.kernel.org Signed-off-by: Arnd Bergmann Reviewed-by: Richard Weinberger Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230417205654.1982368-1-arnd@kernel.org --- drivers/mtd/mtdchar.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 01f1c6792df9c..8dc4f5c493fcb 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -590,8 +590,8 @@ static void adjust_oob_length(struct mtd_info *mtd, uint64_t start, (end_page - start_page + 1) * oob_per_page); } -static int mtdchar_write_ioctl(struct mtd_info *mtd, - struct mtd_write_req __user *argp) +static noinline_for_stack int +mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) { struct mtd_info *master = mtd_get_master(mtd); struct mtd_write_req req; @@ -688,8 +688,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd, return ret; } -static int mtdchar_read_ioctl(struct mtd_info *mtd, - struct mtd_read_req __user *argp) +static noinline_for_stack int +mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp) { struct mtd_info *master = mtd_get_master(mtd); struct mtd_read_req req; From 8a6f4d346f3bad9c68b4a87701eb3f7978542d57 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Thu, 25 May 2023 12:31:52 +1200 Subject: [PATCH 273/303] mtd: rawnand: marvell: ensure timing values are written When new timing values are calculated in marvell_nfc_setup_interface() ensure that they will be applied in marvell_nfc_select_target() by clearing the selected_chip pointer. Fixes: b25251414f6e ("mtd: rawnand: marvell: Stop implementing ->select_chip()") Suggested-by: Miquel Raynal Signed-off-by: Chris Packham Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230525003154.2303012-1-chris.packham@alliedtelesis.co.nz --- drivers/mtd/nand/raw/marvell_nand.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index afb424579f0b8..f1fcf136ad03c 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2457,6 +2457,12 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr, NDTR1_WAIT_MODE; } + /* + * Reset nfc->selected_chip so the next command will cause the timing + * registers to be updated in marvell_nfc_select_target(). + */ + nfc->selected_chip = NULL; + return 0; } From c4d28e30a8d0b979e4029465ab8f312ab6ce2644 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Thu, 25 May 2023 12:31:53 +1200 Subject: [PATCH 274/303] mtd: rawnand: marvell: don't set the NAND frequency select marvell_nfc_setup_interface() uses the frequency retrieved from the clock associated with the nand interface to determine the timings that will be used. By changing the NAND frequency select without reflecting this in the clock configuration this means that the timings calculated don't correctly meet the requirements of the NAND chip. This hasn't been an issue up to now because of a different bug that was stopping the timings being updated after they were initially set. Fixes: b25251414f6e ("mtd: rawnand: marvell: Stop implementing ->select_chip()") Signed-off-by: Chris Packham Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230525003154.2303012-2-chris.packham@alliedtelesis.co.nz --- drivers/mtd/nand/raw/marvell_nand.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index f1fcf136ad03c..30c15e4e1cc0d 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2900,10 +2900,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc) regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL, GENCONF_CLK_GATING_CTRL_ND_GATE, GENCONF_CLK_GATING_CTRL_ND_GATE); - - regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL, - GENCONF_ND_CLK_CTRL_EN, - GENCONF_ND_CLK_CTRL_EN); } /* Configure the DMA if appropriate */ From a60caf039e96d806b1ced893242bae82ba3ccf0d Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Mon, 29 May 2023 16:38:17 +0900 Subject: [PATCH 275/303] net: renesas: rswitch: Fix return value in error path of xmit Fix return value in the error path of rswitch_start_xmit(). If TX queues are full, this function should return NETDEV_TX_BUSY. Fixes: 3590918b5d07 ("net: ethernet: renesas: Add support for "Ethernet Switch"") Signed-off-by: Yoshihiro Shimoda Link: https://lore.kernel.org/r/20230529073817.1145208-1-yoshihiro.shimoda.uh@renesas.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/renesas/rswitch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 29afaddb598db..aace87139cea1 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1485,7 +1485,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { netif_stop_subqueue(ndev, 0); - return ret; + return NETDEV_TX_BUSY; } if (skb_put_padto(skb, ETH_ZLEN)) From 519d6487640835d19461817c75907e6308074a73 Mon Sep 17 00:00:00 2001 From: Xu Liang Date: Wed, 31 May 2023 15:48:22 +0800 Subject: [PATCH 276/303] net: phy: mxl-gpy: extend interrupt fix to all impacted variants The interrupt fix in commit 97a89ed101bb should be applied on all variants of GPY2xx PHY and GPY115C. Fixes: 97a89ed101bb ("net: phy: mxl-gpy: disable interrupts on GPY215 by default") Signed-off-by: Xu Liang Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230531074822.39136-1-lxu@maxlinear.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/mxl-gpy.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c index 6301a9abfb955..ea1073adc5a16 100644 --- a/drivers/net/phy/mxl-gpy.c +++ b/drivers/net/phy/mxl-gpy.c @@ -274,13 +274,6 @@ static int gpy_config_init(struct phy_device *phydev) return ret < 0 ? ret : 0; } -static bool gpy_has_broken_mdint(struct phy_device *phydev) -{ - /* At least these PHYs are known to have broken interrupt handling */ - return phydev->drv->phy_id == PHY_ID_GPY215B || - phydev->drv->phy_id == PHY_ID_GPY215C; -} - static int gpy_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; @@ -300,8 +293,7 @@ static int gpy_probe(struct phy_device *phydev) phydev->priv = priv; mutex_init(&priv->mbox_lock); - if (gpy_has_broken_mdint(phydev) && - !device_property_present(dev, "maxlinear,use-broken-interrupts")) + if (!device_property_present(dev, "maxlinear,use-broken-interrupts")) phydev->dev_flags |= PHY_F_NO_IRQ; fw_version = phy_read(phydev, PHY_FWV); @@ -659,11 +651,9 @@ static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev) * frame. Therefore, polling is the best we can do and won't do any more * harm. * It was observed that this bug happens on link state and link speed - * changes on a GPY215B and GYP215C independent of the firmware version - * (which doesn't mean that this list is exhaustive). + * changes independent of the firmware version. */ - if (gpy_has_broken_mdint(phydev) && - (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC))) { + if (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC)) { reg = gpy_mbox_read(phydev, REG_GPIO0_OUT); if (reg < 0) { phy_error(phydev); From abaf8d51b0cedb16af51fb6b2189370d7515977c Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Wed, 31 May 2023 08:44:57 -0700 Subject: [PATCH 277/303] ice: recycle/free all of the fragments from multi-buffer frame The ice driver caches next_to_clean value at the beginning of ice_clean_rx_irq() in order to remember the first buffer that has to be freed/recycled after main Rx processing loop. The end boundary is indicated by first descriptor of frame that Rx processing loop has ended its duties. Note that if mentioned loop ended in the middle of gathering multi-buffer frame, next_to_clean would be pointing to the descriptor in the middle of the frame BUT freeing/recycling stage will stop at the first descriptor. This means that next iteration of ice_clean_rx_irq() will miss the (first_desc, next_to_clean - 1) entries. When running various 9K MTU workloads, such splats were observed: [ 540.780716] BUG: kernel NULL pointer dereference, address: 0000000000000000 [ 540.787787] #PF: supervisor read access in kernel mode [ 540.793002] #PF: error_code(0x0000) - not-present page [ 540.798218] PGD 0 P4D 0 [ 540.800801] Oops: 0000 [#1] PREEMPT SMP NOPTI [ 540.805231] CPU: 18 PID: 3984 Comm: xskxceiver Tainted: G W 6.3.0-rc7+ #96 [ 540.813619] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0008.031920191559 03/19/2019 [ 540.824209] RIP: 0010:ice_clean_rx_irq+0x2b6/0xf00 [ice] [ 540.829678] Code: 74 24 10 e9 aa 00 00 00 8b 55 78 41 31 57 10 41 09 c4 4d 85 ff 0f 84 83 00 00 00 49 8b 57 08 41 8b 4f 1c 65 8b 35 1a fa 4b 3f <48> 8b 02 48 c1 e8 3a 39 c6 0f 85 a2 00 00 00 f6 42 08 02 0f 85 98 [ 540.848717] RSP: 0018:ffffc9000f42fc50 EFLAGS: 00010282 [ 540.854029] RAX: 0000000000000004 RBX: 0000000000000002 RCX: 000000000000fffe [ 540.861272] RDX: 0000000000000000 RSI: 0000000000000001 RDI: 00000000ffffffff [ 540.868519] RBP: ffff88984a05ac00 R08: 0000000000000000 R09: dead000000000100 [ 540.875760] R10: ffff88983fffcd00 R11: 000000000010f2b8 R12: 0000000000000004 [ 540.883008] R13: 0000000000000003 R14: 0000000000000800 R15: ffff889847a10040 [ 540.890253] FS: 00007f6ddf7fe640(0000) GS:ffff88afdf800000(0000) knlGS:0000000000000000 [ 540.898465] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 540.904299] CR2: 0000000000000000 CR3: 000000010d3da001 CR4: 00000000007706e0 [ 540.911542] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 540.918789] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 540.926032] PKRU: 55555554 [ 540.928790] Call Trace: [ 540.931276] [ 540.933418] ice_napi_poll+0x4ca/0x6d0 [ice] [ 540.937804] ? __pfx_ice_napi_poll+0x10/0x10 [ice] [ 540.942716] napi_busy_loop+0xd7/0x320 [ 540.946537] xsk_recvmsg+0x143/0x170 [ 540.950178] sock_recvmsg+0x99/0xa0 [ 540.953729] __sys_recvfrom+0xa8/0x120 [ 540.957543] ? do_futex+0xbd/0x1d0 [ 540.961008] ? __x64_sys_futex+0x73/0x1d0 [ 540.965083] __x64_sys_recvfrom+0x20/0x30 [ 540.969155] do_syscall_64+0x38/0x90 [ 540.972796] entry_SYSCALL_64_after_hwframe+0x72/0xdc [ 540.977934] RIP: 0033:0x7f6de5f27934 To fix this, set cached_ntc to first_desc so that at the end, when freeing/recycling buffers, descriptors from first to ntc are not missed. Fixes: 2fba7dc5157b ("ice: Add support for XDP multi-buffer on Rx side") Signed-off-by: Maciej Fijalkowski Reviewed-by: Simon Horman Tested-by: Chandan Kumar Rout (A Contingent Worker at Intel) Signed-off-by: Tony Nguyen Link: https://lore.kernel.org/r/20230531154457.3216621-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice_txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 059bd911c51d8..52d0a126eb616 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1152,11 +1152,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int offset = rx_ring->rx_offset; struct xdp_buff *xdp = &rx_ring->xdp; + u32 cached_ntc = rx_ring->first_desc; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL; u32 ntc = rx_ring->next_to_clean; u32 cnt = rx_ring->count; - u32 cached_ntc = ntc; u32 xdp_xmit = 0; u32 cached_ntu; bool failure; From b0ad3c179059089d809b477a1d445c1183a7b8fe Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 31 May 2023 12:01:42 -0400 Subject: [PATCH 278/303] rtnetlink: call validate_linkmsg in rtnl_create_link validate_linkmsg() was introduced by commit 1840bb13c22f5b ("[RTNL]: Validate hardware and broadcast address attribute for RTM_NEWLINK") to validate tb[IFLA_ADDRESS/BROADCAST] for existing links. The same check should also be done for newly created links. This patch adds validate_linkmsg() call in rtnl_create_link(), to avoid the invalid address set when creating some devices like: # ip link add dummy0 type dummy # ip link add link dummy0 name mac0 address 01:02 type macsec Fixes: 0e06877c6fdb ("[RTNETLINK]: rtnl_link: allow specifying initial device address") Signed-off-by: Xin Long Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski --- net/core/rtnetlink.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 653901a1bf750..824688edb722d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3285,6 +3285,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, struct net_device *dev; unsigned int num_tx_queues = 1; unsigned int num_rx_queues = 1; + int err; if (tb[IFLA_NUM_TX_QUEUES]) num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); @@ -3320,13 +3321,18 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, if (!dev) return ERR_PTR(-ENOMEM); + err = validate_linkmsg(dev, tb, extack); + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } + dev_net_set(dev, net); dev->rtnl_link_ops = ops; dev->rtnl_link_state = RTNL_LINK_INITIALIZING; if (tb[IFLA_MTU]) { u32 mtu = nla_get_u32(tb[IFLA_MTU]); - int err; err = dev_validate_mtu(dev, mtu, extack); if (err) { From fef5b228dd38378148bc850f7e69a7783f3b95a4 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 31 May 2023 12:01:43 -0400 Subject: [PATCH 279/303] rtnetlink: move IFLA_GSO_ tb check to validate_linkmsg These IFLA_GSO_* tb check should also be done for the new created link, otherwise, they can be set to a huge value when creating links: # ip link add dummy1 gso_max_size 4294967295 type dummy # ip -d link show dummy1 dummy addrgenmode eui64 ... gso_max_size 4294967295 Fixes: 46e6b992c250 ("rtnetlink: allow GSO maximums to be set on device creation") Fixes: 9eefedd58ae1 ("net: add gso_ipv4_max_size and gro_ipv4_max_size per device") Signed-off-by: Xin Long Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski --- net/core/rtnetlink.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 824688edb722d..bc068a857219e 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2385,6 +2385,25 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], if (tb[IFLA_BROADCAST] && nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) return -EINVAL; + + if (tb[IFLA_GSO_MAX_SIZE] && + nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { + NL_SET_ERR_MSG(extack, "too big gso_max_size"); + return -EINVAL; + } + + if (tb[IFLA_GSO_MAX_SEGS] && + (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || + nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { + NL_SET_ERR_MSG(extack, "too big gso_max_segs"); + return -EINVAL; + } + + if (tb[IFLA_GSO_IPV4_MAX_SIZE] && + nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { + NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); + return -EINVAL; + } } if (tb[IFLA_AF_SPEC]) { @@ -2858,11 +2877,6 @@ static int do_setlink(const struct sk_buff *skb, if (tb[IFLA_GSO_MAX_SIZE]) { u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]); - if (max_size > dev->tso_max_size) { - err = -EINVAL; - goto errout; - } - if (dev->gso_max_size ^ max_size) { netif_set_gso_max_size(dev, max_size); status |= DO_SETLINK_MODIFIED; @@ -2872,11 +2886,6 @@ static int do_setlink(const struct sk_buff *skb, if (tb[IFLA_GSO_MAX_SEGS]) { u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]); - if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) { - err = -EINVAL; - goto errout; - } - if (dev->gso_max_segs ^ max_segs) { netif_set_gso_max_segs(dev, max_segs); status |= DO_SETLINK_MODIFIED; @@ -2895,11 +2904,6 @@ static int do_setlink(const struct sk_buff *skb, if (tb[IFLA_GSO_IPV4_MAX_SIZE]) { u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]); - if (max_size > dev->tso_max_size) { - err = -EINVAL; - goto errout; - } - if (dev->gso_ipv4_max_size ^ max_size) { netif_set_gso_ipv4_max_size(dev, max_size); status |= DO_SETLINK_MODIFIED; From 65d6914e253f3d83b724a9bbfc889ae95711e512 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 31 May 2023 12:01:44 -0400 Subject: [PATCH 280/303] rtnetlink: add the missing IFLA_GRO_ tb check in validate_linkmsg This fixes the issue that dev gro_max_size and gso_ipv4_max_size can be set to a huge value: # ip link add dummy1 type dummy # ip link set dummy1 gro_max_size 4294967295 # ip -d link show dummy1 dummy addrgenmode eui64 ... gro_max_size 4294967295 Fixes: 0fe79f28bfaf ("net: allow gro_max_size to exceed 65536") Fixes: 9eefedd58ae1 ("net: add gso_ipv4_max_size and gro_ipv4_max_size per device") Reported-by: Xiumei Mu Signed-off-by: Xin Long Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski --- net/core/rtnetlink.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index bc068a857219e..41de3a2f29e15 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2399,11 +2399,23 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], return -EINVAL; } + if (tb[IFLA_GRO_MAX_SIZE] && + nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { + NL_SET_ERR_MSG(extack, "too big gro_max_size"); + return -EINVAL; + } + if (tb[IFLA_GSO_IPV4_MAX_SIZE] && nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); return -EINVAL; } + + if (tb[IFLA_GRO_IPV4_MAX_SIZE] && + nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { + NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); + return -EINVAL; + } } if (tb[IFLA_AF_SPEC]) { From 786fc12457268cc9b555dde6c22ae7300d4b40e1 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 31 May 2023 12:37:03 -0700 Subject: [PATCH 281/303] mptcp: fix connect timeout handling Ondrej reported a functional issue WRT timeout handling on connect with a nice reproducer. The problem is that the current mptcp connect waits for both the MPTCP socket level timeout, and the first subflow socket timeout. The latter is not influenced/touched by the exposed setsockopt(). Overall the above makes the SO_SNDTIMEO a no-op on connect. Since mptcp_connect is invoked via inet_stream_connect and the latter properly handle the MPTCP level timeout, we can address the issue making the nested subflow level connect always unblocking. This also allow simplifying a bit the code, dropping an ugly hack to handle the fastopen and custom proto_ops connect. The issues predates the blamed commit below, but the current resolution requires the infrastructure introduced there. Fixes: 54f1944ed6d2 ("mptcp: factor out mptcp_connect()") Reported-by: Ondrej Mosnacek Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/399 Cc: stable@vger.kernel.org Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 29 +++++++---------------------- net/mptcp/protocol.h | 1 - 2 files changed, 7 insertions(+), 23 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 08dc53f56bc23..9cafd3b899088 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1702,7 +1702,6 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, lock_sock(ssk); msg->msg_flags |= MSG_DONTWAIT; - msk->connect_flags = O_NONBLOCK; msk->fastopening = 1; ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL); msk->fastopening = 0; @@ -3617,9 +3616,9 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) * acquired the subflow socket lock, too. */ if (msk->fastopening) - err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1); + err = __inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK, 1); else - err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags); + err = inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK); inet_sk(sk)->defer_connect = inet_sk(ssock->sk)->defer_connect; /* on successful connect, the msk state will be moved to established by @@ -3632,12 +3631,10 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) mptcp_copy_inaddrs(sk, ssock->sk); - /* unblocking connect, mptcp-level inet_stream_connect will error out - * without changing the socket state, update it here. + /* silence EINPROGRESS and let the caller inet_stream_connect + * handle the connection in progress */ - if (err == -EINPROGRESS) - sk->sk_socket->state = ssock->state; - return err; + return 0; } static struct proto mptcp_prot = { @@ -3696,18 +3693,6 @@ static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return err; } -static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, - int addr_len, int flags) -{ - int ret; - - lock_sock(sock->sk); - mptcp_sk(sock->sk)->connect_flags = flags; - ret = __inet_stream_connect(sock, uaddr, addr_len, flags, 0); - release_sock(sock->sk); - return ret; -} - static int mptcp_listen(struct socket *sock, int backlog) { struct mptcp_sock *msk = mptcp_sk(sock->sk); @@ -3859,7 +3844,7 @@ static const struct proto_ops mptcp_stream_ops = { .owner = THIS_MODULE, .release = inet_release, .bind = mptcp_bind, - .connect = mptcp_stream_connect, + .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = mptcp_stream_accept, .getname = inet_getname, @@ -3954,7 +3939,7 @@ static const struct proto_ops mptcp_v6_stream_ops = { .owner = THIS_MODULE, .release = inet6_release, .bind = mptcp_bind, - .connect = mptcp_stream_connect, + .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = mptcp_stream_accept, .getname = inet6_getname, diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 2d7b2c80a1641..de4667dafe598 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -297,7 +297,6 @@ struct mptcp_sock { nodelay:1, fastopening:1, in_accept_queue:1; - int connect_flags; struct work_struct work; struct sk_buff *ooo_last_skb; struct rb_root out_of_order_queue; From 5b825727d0871b23e8867f6371183e61628b4a26 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 31 May 2023 12:37:04 -0700 Subject: [PATCH 282/303] mptcp: add annotations around msk->subflow accesses The MPTCP can access the first subflow socket in a few spots outside the socket lock scope. That is actually safe, as MPTCP will delete the socket itself only after the msk sock close(). Still the such accesses causes a few KCSAN splats, as reported by Christoph. Silence the harmless warning adding a few annotation around the relevant accesses. Fixes: 71ba088ce0aa ("mptcp: cleanup accept and poll") Reported-by: Christoph Paasch Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/402 Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 18 ++++++++++-------- net/mptcp/protocol.h | 6 +++++- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 9cafd3b899088..ce9de2c946b07 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -91,7 +91,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk) return err; msk->first = ssock->sk; - msk->subflow = ssock; + WRITE_ONCE(msk->subflow, ssock); subflow = mptcp_subflow_ctx(ssock->sk); list_add(&subflow->node, &msk->conn_list); sock_hold(ssock->sk); @@ -2282,7 +2282,7 @@ static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk) { if (msk->subflow) { iput(SOCK_INODE(msk->subflow)); - msk->subflow = NULL; + WRITE_ONCE(msk->subflow, NULL); } } @@ -3136,7 +3136,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk, msk = mptcp_sk(nsk); msk->local_key = subflow_req->local_key; msk->token = subflow_req->token; - msk->subflow = NULL; + WRITE_ONCE(msk->subflow, NULL); msk->in_accept_queue = 1; WRITE_ONCE(msk->fully_established, false); if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) @@ -3184,7 +3184,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, struct socket *listener; struct sock *newsk; - listener = msk->subflow; + listener = READ_ONCE(msk->subflow); if (WARN_ON_ONCE(!listener)) { *err = -EINVAL; return NULL; @@ -3736,10 +3736,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, pr_debug("msk=%p", msk); - /* buggy applications can call accept on socket states other then LISTEN + /* Buggy applications can call accept on socket states other then LISTEN * but no need to allocate the first subflow just to error out. */ - ssock = msk->subflow; + ssock = READ_ONCE(msk->subflow); if (!ssock) return -EINVAL; @@ -3813,10 +3813,12 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, state = inet_sk_state_load(sk); pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); if (state == TCP_LISTEN) { - if (WARN_ON_ONCE(!msk->subflow || !msk->subflow->sk)) + struct socket *ssock = READ_ONCE(msk->subflow); + + if (WARN_ON_ONCE(!ssock || !ssock->sk)) return 0; - return inet_csk_listen_poll(msk->subflow->sk); + return inet_csk_listen_poll(ssock->sk); } if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index de4667dafe598..7a1a3c35470f3 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -305,7 +305,11 @@ struct mptcp_sock { struct list_head rtx_queue; struct mptcp_data_frag *first_pending; struct list_head join_list; - struct socket *subflow; /* outgoing connect/listener/!mp_capable */ + struct socket *subflow; /* outgoing connect/listener/!mp_capable + * The mptcp ops can safely dereference, using suitable + * ONCE annotation, the subflow outside the socket + * lock as such sock is freed after close(). + */ struct sock *first; struct mptcp_pm_data pm; struct { From 7e8b88ec35eef363040e08d99536d2bebef83774 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 31 May 2023 12:37:05 -0700 Subject: [PATCH 283/303] mptcp: consolidate passive msk socket initialization When the msk socket is cloned at MPC handshake time, a few fields are initialized in a racy way outside mptcp_sk_clone() and the msk socket lock. The above is due historical reasons: before commit a88d0092b24b ("mptcp: simplify subflow_syn_recv_sock()") as the first subflow socket carrying all the needed date was not available yet at msk creation time We can now refactor the code moving the missing initialization bit under the socket lock, removing the init race and avoiding some code duplication. This will also simplify the next patch, as all msk->first write access are now under the msk socket lock. Fixes: 0397c6d85f9c ("mptcp: keep unaccepted MPC subflow into join list") Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 35 ++++++++++++++++++++++++++++------- net/mptcp/protocol.h | 8 ++++---- net/mptcp/subflow.c | 28 +--------------------------- 3 files changed, 33 insertions(+), 38 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index ce9de2c946b07..2ecd0117ab1b8 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3038,7 +3038,7 @@ static void mptcp_close(struct sock *sk, long timeout) sock_put(sk); } -void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) +static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); @@ -3115,9 +3115,10 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) } #endif -struct sock *mptcp_sk_clone(const struct sock *sk, - const struct mptcp_options_received *mp_opt, - struct request_sock *req) +struct sock *mptcp_sk_clone_init(const struct sock *sk, + const struct mptcp_options_received *mp_opt, + struct sock *ssk, + struct request_sock *req) { struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); @@ -3149,10 +3150,30 @@ struct sock *mptcp_sk_clone(const struct sock *sk, msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; sock_reset_flag(nsk, SOCK_RCU_FREE); - /* will be fully established after successful MPC subflow creation */ - inet_sk_state_store(nsk, TCP_SYN_RECV); - security_inet_csk_clone(nsk, req); + + /* this can't race with mptcp_close(), as the msk is + * not yet exposted to user-space + */ + inet_sk_state_store(nsk, TCP_ESTABLISHED); + + /* The msk maintain a ref to each subflow in the connections list */ + WRITE_ONCE(msk->first, ssk); + list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list); + sock_hold(ssk); + + /* new mpc subflow takes ownership of the newly + * created mptcp socket + */ + mptcp_token_accept(subflow_req, msk); + + /* set msk addresses early to ensure mptcp_pm_get_local_id() + * uses the correct data + */ + mptcp_copy_inaddrs(nsk, ssk); + mptcp_propagate_sndbuf(nsk, ssk); + + mptcp_rcv_space_init(msk, ssk); bh_unlock_sock(nsk); /* note: the newly allocated socket refcount is 2 now */ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 7a1a3c35470f3..c5255258bfb39 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -616,7 +616,6 @@ int mptcp_is_checksum_enabled(const struct net *net); int mptcp_allow_join_id0(const struct net *net); unsigned int mptcp_stale_loss_cnt(const struct net *net); int mptcp_get_pm_type(const struct net *net); -void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk); void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, const struct mptcp_options_received *mp_opt); bool __mptcp_retransmit_pending_data(struct sock *sk); @@ -686,9 +685,10 @@ void __init mptcp_proto_init(void); int __init mptcp_proto_v6_init(void); #endif -struct sock *mptcp_sk_clone(const struct sock *sk, - const struct mptcp_options_received *mp_opt, - struct request_sock *req); +struct sock *mptcp_sk_clone_init(const struct sock *sk, + const struct mptcp_options_received *mp_opt, + struct sock *ssk, + struct request_sock *req); void mptcp_get_options(const struct sk_buff *skb, struct mptcp_options_received *mp_opt); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index ba065b66551a4..4688daa6b38b7 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -815,38 +815,12 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, ctx->setsockopt_seq = listener->setsockopt_seq; if (ctx->mp_capable) { - ctx->conn = mptcp_sk_clone(listener->conn, &mp_opt, req); + ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req); if (!ctx->conn) goto fallback; owner = mptcp_sk(ctx->conn); - - /* this can't race with mptcp_close(), as the msk is - * not yet exposted to user-space - */ - inet_sk_state_store(ctx->conn, TCP_ESTABLISHED); - - /* record the newly created socket as the first msk - * subflow, but don't link it yet into conn_list - */ - WRITE_ONCE(owner->first, child); - - /* new mpc subflow takes ownership of the newly - * created mptcp socket - */ - owner->setsockopt_seq = ctx->setsockopt_seq; mptcp_pm_new_connection(owner, child, 1); - mptcp_token_accept(subflow_req, owner); - - /* set msk addresses early to ensure mptcp_pm_get_local_id() - * uses the correct data - */ - mptcp_copy_inaddrs(ctx->conn, child); - mptcp_propagate_sndbuf(ctx->conn, child); - - mptcp_rcv_space_init(owner, child); - list_add(&ctx->node, &owner->conn_list); - sock_hold(child); /* with OoO packets we can reach here without ingress * mpc option From 1b1b43ee7a208096ecd79e626f2fc90d4a321111 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 31 May 2023 12:37:06 -0700 Subject: [PATCH 284/303] mptcp: fix data race around msk->first access The first subflow socket is accessed outside the msk socket lock by mptcp_subflow_fail(), we need to annotate each write access with WRITE_ONCE, but a few spots still lacks it. Fixes: 76a13b315709 ("mptcp: invoke MP_FAIL response when needed") Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 2ecd0117ab1b8..a7dd7d8c9af2d 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -90,7 +90,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk) if (err) return err; - msk->first = ssock->sk; + WRITE_ONCE(msk->first, ssock->sk); WRITE_ONCE(msk->subflow, ssock); subflow = mptcp_subflow_ctx(ssock->sk); list_add(&subflow->node, &msk->conn_list); @@ -2419,7 +2419,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, sock_put(ssk); if (ssk == msk->first) - msk->first = NULL; + WRITE_ONCE(msk->first, NULL); out: if (ssk == msk->last_snd) @@ -2720,7 +2720,7 @@ static int __mptcp_init_sock(struct sock *sk) WRITE_ONCE(msk->rmem_released, 0); msk->timer_ival = TCP_RTO_MIN; - msk->first = NULL; + WRITE_ONCE(msk->first, NULL); inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); WRITE_ONCE(msk->allow_infinite_fallback, true); From 6b9831bfd9322b297eb6d44257808cc055fdc586 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 31 May 2023 12:37:07 -0700 Subject: [PATCH 285/303] mptcp: add annotations around sk->sk_shutdown accesses Christoph reported the mptcp variant of a recently addressed plain TCP issue. Similar to commit e14cadfd80d7 ("tcp: add annotations around sk->sk_shutdown accesses") add READ/WRITE ONCE annotations to silence KCSAN reports around lockless sk_shutdown access. Fixes: 71ba088ce0aa ("mptcp: cleanup accept and poll") Reported-by: Christoph Paasch Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/401 Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index a7dd7d8c9af2d..af54a878ac277 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -603,7 +603,7 @@ static bool mptcp_check_data_fin(struct sock *sk) WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); WRITE_ONCE(msk->rcv_data_fin, 0); - sk->sk_shutdown |= RCV_SHUTDOWN; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ switch (sk->sk_state) { @@ -910,7 +910,7 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk) /* hopefully temporary hack: propagate shutdown status * to msk, when all subflows agree on it */ - sk->sk_shutdown |= RCV_SHUTDOWN; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ sk->sk_data_ready(sk); @@ -2526,7 +2526,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk) } inet_sk_state_store(sk, TCP_CLOSE); - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); @@ -2958,7 +2958,7 @@ bool __mptcp_close(struct sock *sk, long timeout) bool do_cancel_work = false; int subflows_alive = 0; - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { mptcp_listen_inuse_dec(sk); @@ -3101,7 +3101,7 @@ static int mptcp_disconnect(struct sock *sk, int flags) mptcp_pm_data_reset(msk); mptcp_ca_reset(sk); - sk->sk_shutdown = 0; + WRITE_ONCE(sk->sk_shutdown, 0); sk_error_report(sk); return 0; } @@ -3806,9 +3806,6 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) { struct sock *sk = (struct sock *)msk; - if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) - return EPOLLOUT | EPOLLWRNORM; - if (sk_stream_is_writeable(sk)) return EPOLLOUT | EPOLLWRNORM; @@ -3826,6 +3823,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, struct sock *sk = sock->sk; struct mptcp_sock *msk; __poll_t mask = 0; + u8 shutdown; int state; msk = mptcp_sk(sk); @@ -3842,17 +3840,22 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, return inet_csk_listen_poll(ssock->sk); } + shutdown = READ_ONCE(sk->sk_shutdown); + if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) + mask |= EPOLLHUP; + if (shutdown & RCV_SHUTDOWN) + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; + if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { mask |= mptcp_check_readable(msk); - mask |= mptcp_check_writeable(msk); + if (shutdown & SEND_SHUTDOWN) + mask |= EPOLLOUT | EPOLLWRNORM; + else + mask |= mptcp_check_writeable(msk); } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { /* cf tcp_poll() note about TFO */ mask |= EPOLLOUT | EPOLLWRNORM; } - if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) - mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */ smp_rmb(); From 55b47ca7d80814ceb63d64e032e96cd6777811e5 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 31 May 2023 12:37:08 -0700 Subject: [PATCH 286/303] mptcp: fix active subflow finalization Active subflow are inserted into the connection list at creation time. When the MPJ handshake completes successfully, a new subflow creation netlink event is generated correctly, but the current code wrongly avoid initializing a couple of subflow data. The above will cause misbehavior on a few exceptional events: unneeded mptcp-level retransmission on msk-level sequence wrap-around and infinite mapping fallback even when a MPJ socket is present. Address the issue factoring out the needed initialization in a new helper and invoking the latter from __mptcp_finish_join() time for passive subflow and from mptcp_finish_join() for active ones. Fixes: 0530020a7c8f ("mptcp: track and update contiguous data status") Cc: stable@vger.kernel.org Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index af54a878ac277..67311e7d5b21a 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -825,6 +825,13 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk) mptcp_data_unlock(sk); } +static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) +{ + mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); + WRITE_ONCE(msk->allow_infinite_fallback, false); + mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); +} + static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) { struct sock *sk = (struct sock *)msk; @@ -839,6 +846,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) mptcp_sock_graft(ssk, sk->sk_socket); mptcp_sockopt_sync_locked(msk, ssk); + mptcp_subflow_joined(msk, ssk); return true; } @@ -3485,14 +3493,16 @@ bool mptcp_finish_join(struct sock *ssk) return false; } - if (!list_empty(&subflow->node)) - goto out; + /* active subflow, already present inside the conn_list */ + if (!list_empty(&subflow->node)) { + mptcp_subflow_joined(msk, ssk); + return true; + } if (!mptcp_pm_allow_new_subflow(msk)) goto err_prohibited; - /* active connections are already on conn_list. - * If we can't acquire msk socket lock here, let the release callback + /* If we can't acquire msk socket lock here, let the release callback * handle it */ mptcp_data_lock(parent); @@ -3515,11 +3525,6 @@ bool mptcp_finish_join(struct sock *ssk) return false; } - subflow->map_seq = READ_ONCE(msk->ack_seq); - WRITE_ONCE(msk->allow_infinite_fallback, false); - -out: - mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); return true; } From 9a7e8ec0d4cc64870ea449b4fce5779b77496cbb Mon Sep 17 00:00:00 2001 From: Ism Hong Date: Thu, 1 Jun 2023 17:53:55 +0800 Subject: [PATCH 287/303] riscv: perf: Fix callchain parse error with kernel tracepoint events For RISC-V, when tracing with tracepoint events, the IP and status are set to 0, preventing the perf code parsing the callchain and resolving the symbols correctly. ./ply 'tracepoint:kmem/kmem_cache_alloc { @[stack]=count(); }' @: { }: 1 The fix is to implement perf_arch_fetch_caller_regs for riscv, which fills several necessary registers used for callchain unwinding, including epc, sp, s0 and status. It's similar to commit b3eac0265bf6 ("arm: perf: Fix callchain parse error with kernel tracepoint events") and commit 5b09a094f2fb ("arm64: perf: Fix callchain parse error with kernel tracepoint events"). With this patch, callchain can be parsed correctly as: ./ply 'tracepoint:kmem/kmem_cache_alloc { @[stack]=count(); }' @: { __traceiter_kmem_cache_alloc+68 __traceiter_kmem_cache_alloc+68 kmem_cache_alloc+354 __sigqueue_alloc+94 __send_signal_locked+646 send_signal_locked+154 do_send_sig_info+84 __kill_pgrp_info+130 kill_pgrp+60 isig+150 n_tty_receive_signal_char+36 n_tty_receive_buf_standard+2214 n_tty_receive_buf_common+280 n_tty_receive_buf2+26 tty_ldisc_receive_buf+34 tty_port_default_receive_buf+62 flush_to_ldisc+158 process_one_work+458 worker_thread+138 kthread+178 riscv_cpufeature_patch_func+832 }: 1 Signed-off-by: Ism Hong Link: https://lore.kernel.org/r/20230601095355.1168910-1-ism.hong@gmail.com Fixes: 178e9fc47aae ("perf: riscv: preliminary RISC-V support") Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/perf_event.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h index d42c901f9a977..665bbc9b2f840 100644 --- a/arch/riscv/include/asm/perf_event.h +++ b/arch/riscv/include/asm/perf_event.h @@ -10,4 +10,11 @@ #include #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs + +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->epc = (__ip); \ + (regs)->s0 = (unsigned long) __builtin_frame_address(0); \ + (regs)->sp = current_stack_pointer; \ + (regs)->status = SR_PP; \ +} #endif /* _ASM_RISCV_PERF_EVENT_H */ From 42c4e97e06a839b07d834f640a10911ad84ec8b3 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 1 Jun 2023 10:21:21 -0400 Subject: [PATCH 288/303] selinux: don't use make's grouped targets feature yet The Linux Kernel currently only requires make v3.82 while the grouped target functionality requires make v4.3. Removed the grouped target introduced in 4ce1f694eb5d ("selinux: ensure av_permissions.h is built when needed") as well as the multiple header file targets in the make rule. This effectively reverts the problem commit. We will revisit this change when make >= 4.3 is required by the rest of the kernel. Cc: stable@vger.kernel.org Fixes: 4ce1f694eb5d ("selinux: ensure av_permissions.h is built when needed") Reported-by: Erwan Velu Reported-by: Luiz Capitulino Tested-by: Luiz Capitulino Signed-off-by: Paul Moore --- security/selinux/Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/security/selinux/Makefile b/security/selinux/Makefile index 0aecf9334ec31..8b21520bd4b9f 100644 --- a/security/selinux/Makefile +++ b/security/selinux/Makefile @@ -26,5 +26,9 @@ quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h targets += flask.h av_permissions.h -$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE +# once make >= 4.3 is required, we can use grouped targets in the rule below, +# which basically involves adding both headers and a '&' before the colon, see +# the example below: +# $(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/... +$(obj)/flask.h: scripts/selinux/genheaders/genheaders FORCE $(call if_changed,flask) From 403e97d6ab2cb6fd0ac1ff968cd7b691771f1613 Mon Sep 17 00:00:00 2001 From: Francesco Dolcini Date: Wed, 31 May 2023 13:10:38 +0200 Subject: [PATCH 289/303] dt-bindings: serial: 8250_omap: add rs485-rts-active-high Add rs485-rts-active-high property, this was removed by mistake. In general we just use rs485-rts-active-low property, however the OMAP UART for legacy reason uses the -high one. Fixes: 767d3467eb60 ("dt-bindings: serial: 8250_omap: drop rs485 properties") Closes: https://lore.kernel.org/all/ZGefR4mTHHo1iQ7H@francesco-nb.int.toradex.com/ Signed-off-by: Francesco Dolcini Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20230531111038.6302-1-francesco@dolcini.it Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/serial/8250_omap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/serial/8250_omap.yaml b/Documentation/devicetree/bindings/serial/8250_omap.yaml index eb3488d8f9ee6..6a7be42da523c 100644 --- a/Documentation/devicetree/bindings/serial/8250_omap.yaml +++ b/Documentation/devicetree/bindings/serial/8250_omap.yaml @@ -70,6 +70,7 @@ properties: dsr-gpios: true rng-gpios: true dcd-gpios: true + rs485-rts-active-high: true rts-gpio: true power-domains: true clock-frequency: true From f9010dbdce911ee1f1af1398a24b1f9f992e0080 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 1 Jun 2023 13:32:32 -0500 Subject: [PATCH 290/303] fork, vhost: Use CLONE_THREAD to fix freezer/ps regression When switching from kthreads to vhost_tasks two bugs were added: 1. The vhost worker tasks's now show up as processes so scripts doing ps or ps a would not incorrectly detect the vhost task as another process. 2. kthreads disabled freeze by setting PF_NOFREEZE, but vhost tasks's didn't disable or add support for them. To fix both bugs, this switches the vhost task to be thread in the process that does the VHOST_SET_OWNER ioctl, and has vhost_worker call get_signal to support SIGKILL/SIGSTOP and freeze signals. Note that SIGKILL/STOP support is required because CLONE_THREAD requires CLONE_SIGHAND which requires those 2 signals to be supported. This is a modified version of the patch written by Mike Christie which was a modified version of patch originally written by Linus. Much of what depended upon PF_IO_WORKER now depends on PF_USER_WORKER. Including ignoring signals, setting up the register state, and having get_signal return instead of calling do_group_exit. Tidied up the vhost_task abstraction so that the definition of vhost_task only needs to be visible inside of vhost_task.c. Making it easier to review the code and tell what needs to be done where. As part of this the main loop has been moved from vhost_worker into vhost_task_fn. vhost_worker now returns true if work was done. The main loop has been updated to call get_signal which handles SIGSTOP, freezing, and collects the message that tells the thread to exit as part of process exit. This collection clears __fatal_signal_pending. This collection is not guaranteed to clear signal_pending() so clear that explicitly so the schedule() sleeps. For now the vhost thread continues to exist and run work until the last file descriptor is closed and the release function is called as part of freeing struct file. To avoid hangs in the coredump rendezvous and when killing threads in a multi-threaded exec. The coredump code and de_thread have been modified to ignore vhost threads. Remvoing the special case for exec appears to require teaching vhost_dev_flush how to directly complete transactions in case the vhost thread is no longer running. Removing the special case for coredump rendezvous requires either the above fix needed for exec or moving the coredump rendezvous into get_signal. Fixes: 6e890c5d5021 ("vhost: use vhost_tasks for worker threads") Signed-off-by: Eric W. Biederman Co-developed-by: Mike Christie Signed-off-by: Mike Christie Acked-by: Michael S. Tsirkin Signed-off-by: Linus Torvalds --- arch/x86/include/asm/fpu/sched.h | 2 +- arch/x86/kernel/fpu/context.h | 2 +- arch/x86/kernel/fpu/core.c | 2 +- drivers/vhost/vhost.c | 22 ++------ fs/coredump.c | 4 +- include/linux/sched/task.h | 1 - include/linux/sched/vhost_task.h | 15 ++---- kernel/exit.c | 5 +- kernel/fork.c | 13 ++--- kernel/signal.c | 8 +-- kernel/vhost_task.c | 92 +++++++++++++++++++++----------- 11 files changed, 89 insertions(+), 77 deletions(-) diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h index c2d6cd78ed0c2..78fcde7b1f070 100644 --- a/arch/x86/include/asm/fpu/sched.h +++ b/arch/x86/include/asm/fpu/sched.h @@ -39,7 +39,7 @@ extern void fpu_flush_thread(void); static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) { if (cpu_feature_enabled(X86_FEATURE_FPU) && - !(current->flags & (PF_KTHREAD | PF_IO_WORKER))) { + !(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { save_fpregs_to_fpstate(old_fpu); /* * The save operation preserved register state, so the diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h index 9fcfa5c4dad79..af5cbdd9bd29a 100644 --- a/arch/x86/kernel/fpu/context.h +++ b/arch/x86/kernel/fpu/context.h @@ -57,7 +57,7 @@ static inline void fpregs_restore_userregs(void) struct fpu *fpu = ¤t->thread.fpu; int cpu = smp_processor_id(); - if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_IO_WORKER))) + if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER))) return; if (!fpregs_state_valid(fpu, cpu)) { diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index caf33486dc5ee..1015af1ae562b 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -426,7 +426,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask) this_cpu_write(in_kernel_fpu, true); - if (!(current->flags & (PF_KTHREAD | PF_IO_WORKER)) && + if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && !test_thread_flag(TIF_NEED_FPU_LOAD)) { set_thread_flag(TIF_NEED_FPU_LOAD); save_fpregs_to_fpstate(¤t->thread.fpu); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index a92af08e78645..0742730208495 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -256,7 +256,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) * test_and_set_bit() implies a memory barrier. */ llist_add(&work->node, &dev->worker->work_list); - wake_up_process(dev->worker->vtsk->task); + vhost_task_wake(dev->worker->vtsk); } } EXPORT_SYMBOL_GPL(vhost_work_queue); @@ -333,31 +333,19 @@ static void vhost_vq_reset(struct vhost_dev *dev, __vhost_vq_meta_reset(vq); } -static int vhost_worker(void *data) +static bool vhost_worker(void *data) { struct vhost_worker *worker = data; struct vhost_work *work, *work_next; struct llist_node *node; - for (;;) { - /* mb paired w/ kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - - if (vhost_task_should_stop(worker->vtsk)) { - __set_current_state(TASK_RUNNING); - break; - } - - node = llist_del_all(&worker->work_list); - if (!node) - schedule(); - + node = llist_del_all(&worker->work_list); + if (node) { node = llist_reverse_order(node); /* make sure flag is seen after deletion */ smp_wmb(); llist_for_each_entry_safe(work, work_next, node, node) { clear_bit(VHOST_WORK_QUEUED, &work->flags); - __set_current_state(TASK_RUNNING); kcov_remote_start_common(worker->kcov_handle); work->fn(work); kcov_remote_stop(); @@ -365,7 +353,7 @@ static int vhost_worker(void *data) } } - return 0; + return !!node; } static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) diff --git a/fs/coredump.c b/fs/coredump.c index ece7badf701bc..88740c51b9420 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -371,7 +371,9 @@ static int zap_process(struct task_struct *start, int exit_code) if (t != current && !(t->flags & PF_POSTCOREDUMP)) { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); - nr++; + /* The vhost_worker does not particpate in coredumps */ + if ((t->flags & (PF_USER_WORKER | PF_IO_WORKER)) != PF_USER_WORKER) + nr++; } } diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 537cbf9a2adea..e0f5ac90a228b 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -29,7 +29,6 @@ struct kernel_clone_args { u32 io_thread:1; u32 user_worker:1; u32 no_files:1; - u32 ignore_signals:1; unsigned long stack; unsigned long stack_size; unsigned long tls; diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h index 6123c10b99cf3..837a23624a66a 100644 --- a/include/linux/sched/vhost_task.h +++ b/include/linux/sched/vhost_task.h @@ -2,22 +2,13 @@ #ifndef _LINUX_VHOST_TASK_H #define _LINUX_VHOST_TASK_H -#include -struct task_struct; +struct vhost_task; -struct vhost_task { - int (*fn)(void *data); - void *data; - struct completion exited; - unsigned long flags; - struct task_struct *task; -}; - -struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg, +struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg, const char *name); void vhost_task_start(struct vhost_task *vtsk); void vhost_task_stop(struct vhost_task *vtsk); -bool vhost_task_should_stop(struct vhost_task *vtsk); +void vhost_task_wake(struct vhost_task *vtsk); #endif diff --git a/kernel/exit.c b/kernel/exit.c index 34b90e2e7cf7f..edb50b4c99728 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -411,7 +411,10 @@ static void coredump_task_exit(struct task_struct *tsk) tsk->flags |= PF_POSTCOREDUMP; core_state = tsk->signal->core_state; spin_unlock_irq(&tsk->sighand->siglock); - if (core_state) { + + /* The vhost_worker does not particpate in coredumps */ + if (core_state && + ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) { struct core_thread self; self.task = current; diff --git a/kernel/fork.c b/kernel/fork.c index ed4e01daccaa0..81cba91f30bbe 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2336,16 +2336,16 @@ __latent_entropy struct task_struct *copy_process( p->flags &= ~PF_KTHREAD; if (args->kthread) p->flags |= PF_KTHREAD; - if (args->user_worker) - p->flags |= PF_USER_WORKER; - if (args->io_thread) { + if (args->user_worker) { /* - * Mark us an IO worker, and block any signal that isn't + * Mark us a user worker, and block any signal that isn't * fatal or STOP */ - p->flags |= PF_IO_WORKER; + p->flags |= PF_USER_WORKER; siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); } + if (args->io_thread) + p->flags |= PF_IO_WORKER; if (args->name) strscpy_pad(p->comm, args->name, sizeof(p->comm)); @@ -2517,9 +2517,6 @@ __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_cleanup_io; - if (args->ignore_signals) - ignore_signals(p); - stackleak_task_init(p); if (pid != &init_struct_pid) { diff --git a/kernel/signal.c b/kernel/signal.c index 8f6330f0e9ca3..2547fa73bde51 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1368,7 +1368,9 @@ int zap_other_threads(struct task_struct *p) while_each_thread(p, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); - count++; + /* Don't require de_thread to wait for the vhost_worker */ + if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER) + count++; /* Don't bother with already dead threads */ if (t->exit_state) @@ -2861,11 +2863,11 @@ bool get_signal(struct ksignal *ksig) } /* - * PF_IO_WORKER threads will catch and exit on fatal signals + * PF_USER_WORKER threads will catch and exit on fatal signals * themselves. They have cleanup that must be performed, so * we cannot call do_exit() on their behalf. */ - if (current->flags & PF_IO_WORKER) + if (current->flags & PF_USER_WORKER) goto out; /* diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index b7cbd66f889ea..f80d5c51ae671 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -12,58 +12,88 @@ enum vhost_task_flags { VHOST_TASK_FLAGS_STOP, }; +struct vhost_task { + bool (*fn)(void *data); + void *data; + struct completion exited; + unsigned long flags; + struct task_struct *task; +}; + static int vhost_task_fn(void *data) { struct vhost_task *vtsk = data; - int ret; + bool dead = false; + + for (;;) { + bool did_work; + + /* mb paired w/ vhost_task_stop */ + if (test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags)) + break; + + if (!dead && signal_pending(current)) { + struct ksignal ksig; + /* + * Calling get_signal will block in SIGSTOP, + * or clear fatal_signal_pending, but remember + * what was set. + * + * This thread won't actually exit until all + * of the file descriptors are closed, and + * the release function is called. + */ + dead = get_signal(&ksig); + if (dead) + clear_thread_flag(TIF_SIGPENDING); + } + + did_work = vtsk->fn(vtsk->data); + if (!did_work) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + } - ret = vtsk->fn(vtsk->data); complete(&vtsk->exited); - do_exit(ret); + do_exit(0); +} + +/** + * vhost_task_wake - wakeup the vhost_task + * @vtsk: vhost_task to wake + * + * wake up the vhost_task worker thread + */ +void vhost_task_wake(struct vhost_task *vtsk) +{ + wake_up_process(vtsk->task); } +EXPORT_SYMBOL_GPL(vhost_task_wake); /** * vhost_task_stop - stop a vhost_task * @vtsk: vhost_task to stop * - * Callers must call vhost_task_should_stop and return from their worker - * function when it returns true; + * vhost_task_fn ensures the worker thread exits after + * VHOST_TASK_FLAGS_SOP becomes true. */ void vhost_task_stop(struct vhost_task *vtsk) { - pid_t pid = vtsk->task->pid; - set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); - wake_up_process(vtsk->task); + vhost_task_wake(vtsk); /* * Make sure vhost_task_fn is no longer accessing the vhost_task before - * freeing it below. If userspace crashed or exited without closing, - * then the vhost_task->task could already be marked dead so - * kernel_wait will return early. + * freeing it below. */ wait_for_completion(&vtsk->exited); - /* - * If we are just closing/removing a device and the parent process is - * not exiting then reap the task. - */ - kernel_wait4(pid, NULL, __WCLONE, NULL); kfree(vtsk); } EXPORT_SYMBOL_GPL(vhost_task_stop); /** - * vhost_task_should_stop - should the vhost task return from the work function - * @vtsk: vhost_task to stop - */ -bool vhost_task_should_stop(struct vhost_task *vtsk) -{ - return test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags); -} -EXPORT_SYMBOL_GPL(vhost_task_should_stop); - -/** - * vhost_task_create - create a copy of a process to be used by the kernel - * @fn: thread stack + * vhost_task_create - create a copy of a task to be used by the kernel + * @fn: vhost worker function * @arg: data to be passed to fn * @name: the thread's name * @@ -71,17 +101,17 @@ EXPORT_SYMBOL_GPL(vhost_task_should_stop); * failure. The returned task is inactive, and the caller must fire it up * through vhost_task_start(). */ -struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg, +struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg, const char *name) { struct kernel_clone_args args = { - .flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM, + .flags = CLONE_FS | CLONE_UNTRACED | CLONE_VM | + CLONE_THREAD | CLONE_SIGHAND, .exit_signal = 0, .fn = vhost_task_fn, .name = name, .user_worker = 1, .no_files = 1, - .ignore_signals = 1, }; struct vhost_task *vtsk; struct task_struct *tsk; From fadb74f9f2f609238070c7ca1b04933dc9400e4a Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 1 Jun 2023 14:23:31 -0700 Subject: [PATCH 291/303] module/decompress: Fix error checking on zstd decompression While implementing support for in-kernel decompression in kmod, finit_module() was returning a very suspicious value: finit_module(3, "", MODULE_INIT_COMPRESSED_FILE) = 18446744072717407296 It turns out the check for module_get_next_page() failing is wrong, and hence the decompression was not really taking place. Invert the condition to fix it. Fixes: 169a58ad824d ("module/decompress: Support zstd in-kernel decompression") Cc: stable@kernel.org Cc: Luis Chamberlain Cc: Dmitry Torokhov Cc: Stephen Boyd Signed-off-by: Lucas De Marchi Signed-off-by: Luis Chamberlain --- kernel/module/decompress.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c index e97232b125ebc..8a5d6d63b06cb 100644 --- a/kernel/module/decompress.c +++ b/kernel/module/decompress.c @@ -257,7 +257,7 @@ static ssize_t module_zstd_decompress(struct load_info *info, do { struct page *page = module_get_next_page(info); - if (!IS_ERR(page)) { + if (IS_ERR(page)) { retval = PTR_ERR(page); goto out; } From 835e5ac3f98e78eca5c512bd48bd1880b90c4eb1 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Fri, 28 Apr 2023 14:01:19 +0200 Subject: [PATCH 292/303] riscv: Fix huge_ptep_set_wrprotect when PTE is a NAPOT We need to avoid inconsistencies across the PTEs that form a NAPOT region, so when we write protect such a region, we should clear and flush all the PTEs to make sure that any of those PTEs is not cached which would result in such inconsistencies (arm64 does the same). Fixes: 82a1a1f3bfb6 ("riscv: mm: support Svnapot in hugetlb page") Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20230428120120.21620-1-alexghiti@rivosinc.com Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/hugetlbpage.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index a163a3e0f0d46..238d00bdac147 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -218,6 +218,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, { pte_t pte = ptep_get(ptep); unsigned long order; + pte_t orig_pte; int i, pte_num; if (!pte_napot(pte)) { @@ -228,9 +229,12 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, order = napot_cont_order(pte); pte_num = napot_pte_num(order); ptep = huge_pte_offset(mm, addr, napot_cont_size(order)); + orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num); + + orig_pte = pte_wrprotect(orig_pte); for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) - ptep_set_wrprotect(mm, addr, ptep); + set_pte_at(mm, addr, ptep, orig_pte); } pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, From 6966d7988c4fb6af3e395868e9800c07f9e98a30 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Fri, 28 Apr 2023 14:01:20 +0200 Subject: [PATCH 293/303] riscv: Implement missing huge_ptep_get huge_ptep_get must be reimplemented in order to go through all the PTEs of a NAPOT region: this is needed because the HW can update the A/D bits of any of the PTE that constitutes the NAPOT region. Fixes: 82a1a1f3bfb6 ("riscv: mm: support Svnapot in hugetlb page") Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20230428120120.21620-2-alexghiti@rivosinc.com Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/hugetlb.h | 3 +++ arch/riscv/mm/hugetlbpage.c | 24 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index fe6f230066416..ce1ebda1a49a7 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -36,6 +36,9 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); +#define __HAVE_ARCH_HUGE_PTEP_GET +pte_t huge_ptep_get(pte_t *ptep); + pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags); #define arch_make_huge_pte arch_make_huge_pte diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index 238d00bdac147..e0ef56dc57b90 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -3,6 +3,30 @@ #include #ifdef CONFIG_RISCV_ISA_SVNAPOT +pte_t huge_ptep_get(pte_t *ptep) +{ + unsigned long pte_num; + int i; + pte_t orig_pte = ptep_get(ptep); + + if (!pte_present(orig_pte) || !pte_napot(orig_pte)) + return orig_pte; + + pte_num = napot_pte_num(napot_cont_order(orig_pte)); + + for (i = 0; i < pte_num; i++, ptep++) { + pte_t pte = ptep_get(ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} + pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, From 3582e74599d376bc18cae123045cd295360d885b Mon Sep 17 00:00:00 2001 From: Ojaswin Mujoo Date: Tue, 30 May 2023 18:03:39 +0530 Subject: [PATCH 294/303] Revert "ext4: remove ac->ac_found > sbi->s_mb_min_to_scan dead check in ext4_mb_check_limits" This reverts commit 32c0869370194ae5ac9f9f501953ef693040f6a1. The reverted commit was intended to remove a dead check however it was observed that this check was actually being used to exit early instead of looping sbi->s_mb_max_to_scan times when we are able to find a free extent bigger than the goal extent. Due to this, a my performance tests (fsmark, parallel file writes in a highly fragmented FS) were seeing a 2x-3x regression. Example, the default value of the following variables is: sbi->s_mb_max_to_scan = 200 sbi->s_mb_min_to_scan = 10 In ext4_mb_check_limits() if we find an extent smaller than goal, then we return early and try again. This loop will go on until we have processed sbi->s_mb_max_to_scan(=200) number of free extents at which point we exit and just use whatever we have even if it is smaller than goal extent. Now, the regression comes when we find an extent bigger than goal. Earlier, in this case we would loop only sbi->s_mb_min_to_scan(=10) times and then just use the bigger extent. However with commit 32c08693 that check was removed and hence we would loop sbi->s_mb_max_to_scan(=200) times even though we have a big enough free extent to satisfy the request. The only time we would exit early would be when the free extent is *exactly* the size of our goal, which is pretty uncommon occurrence and so we would almost always end up looping 200 times. Hence, revert the commit by adding the check back to fix the regression. Also add a comment to outline this policy. Fixes: 32c086937019 ("ext4: remove ac->ac_found > sbi->s_mb_min_to_scan dead check in ext4_mb_check_limits") Signed-off-by: Ojaswin Mujoo Reviewed-by: Ritesh Harjani (IBM) Reviewed-by: Kemeng Shi Link: https://lore.kernel.org/r/ddcae9658e46880dfec2fb0aa61d01fb3353d202.1685449706.git.ojaswin@linux.ibm.com Signed-off-by: Theodore Ts'o --- fs/ext4/mballoc.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 7b2e36d103cb1..20f67a260df50 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2062,7 +2062,7 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac, if (bex->fe_len < gex->fe_len) return; - if (finish_group) + if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) ext4_mb_use_best_found(ac, e4b); } @@ -2074,6 +2074,20 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac, * in the context. Later, the best found extent will be used, if * mballoc can't find good enough extent. * + * The algorithm used is roughly as follows: + * + * * If free extent found is exactly as big as goal, then + * stop the scan and use it immediately + * + * * If free extent found is smaller than goal, then keep retrying + * upto a max of sbi->s_mb_max_to_scan times (default 200). After + * that stop scanning and use whatever we have. + * + * * If free extent found is bigger than goal, then keep retrying + * upto a max of sbi->s_mb_min_to_scan times (default 10) before + * stopping the scan and using the extent. + * + * * FIXME: real allocation policy is to be designed yet! */ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, From 4ecd704a4c51fd95973fcc3a60444e0e24eb9439 Mon Sep 17 00:00:00 2001 From: Lino Sanfilippo Date: Tue, 30 May 2023 18:41:16 +0200 Subject: [PATCH 295/303] tpm, tpm_tis: correct tpm_tis_flags enumeration values With commit 858e8b792d06 ("tpm, tpm_tis: Avoid cache incoherency in test for interrupts") bit accessor functions are used to access flags in tpm_tis_data->flags. However these functions expect bit numbers, while the flags are defined as bit masks in enum tpm_tis_flag. Fix this inconsistency by using numbers instead of masks also for the flags in the enum. Reported-by: Pavel Machek Fixes: 858e8b792d06 ("tpm, tpm_tis: Avoid cache incoherency in test for interrupts") Signed-off-by: Lino Sanfilippo Cc: stable@vger.kernel.org Reviewed-by: Pavel Machek Signed-off-by: Linus Torvalds --- drivers/char/tpm/tpm_tis_core.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index e978f457fd4d4..610bfadb6acf1 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -84,10 +84,10 @@ enum tis_defaults { #define ILB_REMAP_SIZE 0x100 enum tpm_tis_flags { - TPM_TIS_ITPM_WORKAROUND = BIT(0), - TPM_TIS_INVALID_STATUS = BIT(1), - TPM_TIS_DEFAULT_CANCELLATION = BIT(2), - TPM_TIS_IRQ_TESTED = BIT(3), + TPM_TIS_ITPM_WORKAROUND = 0, + TPM_TIS_INVALID_STATUS = 1, + TPM_TIS_DEFAULT_CANCELLATION = 2, + TPM_TIS_IRQ_TESTED = 3, }; struct tpm_tis_data { From 817fa998362d6ea9fabd5e97af8e9e2eb5f0e6f2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 1 Jun 2023 18:01:37 -0700 Subject: [PATCH 296/303] KVM: x86/mmu: Grab memslot for correct address space in NX recovery worker Factor in the address space (non-SMM vs. SMM) of the target shadow page when recovering potential NX huge pages, otherwise KVM will retrieve the wrong memslot when zapping shadow pages that were created for SMM. The bug most visibly manifests as a WARN on the memslot being non-NULL, but the worst case scenario is that KVM could unaccount the shadow page without ensuring KVM won't install a huge page, i.e. if the non-SMM slot is being dirty logged, but the SMM slot is not. ------------[ cut here ]------------ WARNING: CPU: 1 PID: 3911 at arch/x86/kvm/mmu/mmu.c:7015 kvm_nx_huge_page_recovery_worker+0x38c/0x3d0 [kvm] CPU: 1 PID: 3911 Comm: kvm-nx-lpage-re RIP: 0010:kvm_nx_huge_page_recovery_worker+0x38c/0x3d0 [kvm] RSP: 0018:ffff99b284f0be68 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff99b284edd000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffff9271397024e0 R08: 0000000000000000 R09: ffff927139702450 R10: 0000000000000000 R11: 0000000000000001 R12: ffff99b284f0be98 R13: 0000000000000000 R14: ffff9270991fcd80 R15: 0000000000000003 FS: 0000000000000000(0000) GS:ffff927f9f640000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f0aacad3ae0 CR3: 000000088fc2c005 CR4: 00000000003726e0 Call Trace: __pfx_kvm_nx_huge_page_recovery_worker+0x10/0x10 [kvm] kvm_vm_worker_thread+0x106/0x1c0 [kvm] kthread+0xd9/0x100 ret_from_fork+0x2c/0x50 ---[ end trace 0000000000000000 ]--- This bug was exposed by commit edbdb43fc96b ("KVM: x86: Preserve TDP MMU roots until they are explicitly invalidated"), which allowed KVM to retain SMM TDP MMU roots effectively indefinitely. Before commit edbdb43fc96b, KVM would zap all SMM TDP MMU roots and thus all SMM TDP MMU shadow pages once all vCPUs exited SMM, which made the window where this bug (recovering an SMM NX huge page) could be encountered quite tiny. To hit the bug, the NX recovery thread would have to run while at least one vCPU was in SMM. Most VMs typically only use SMM during boot, and so the problematic shadow pages were gone by the time the NX recovery thread ran. Now that KVM preserves TDP MMU roots until they are explicitly invalidated (e.g. by a memslot deletion), the window to trigger the bug is effectively never closed because most VMMs don't delete memslots after boot (except for a handful of special scenarios). Fixes: eb298605705a ("KVM: x86/mmu: Do not recover dirty-tracked NX Huge Pages") Reported-by: Fabio Coatti Closes: https://lore.kernel.org/all/CADpTngX9LESCdHVu_2mQkNGena_Ng2CphWNwsRGSMxzDsTjU2A@mail.gmail.com Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230602010137.784664-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c8961f45e3b1c..6eaa3d6994aeb 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -7091,7 +7091,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm) */ slot = NULL; if (atomic_read(&kvm->nr_memslots_dirty_logging)) { - slot = gfn_to_memslot(kvm, sp->gfn); + struct kvm_memslots *slots; + + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, sp->gfn); WARN_ON_ONCE(!slot); } From b2ce89978889b848a6dd652695dd1887e416f9d2 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Fri, 19 May 2023 13:26:18 +0200 Subject: [PATCH 297/303] KVM: SVM: vNMI pending bit is V_NMI_PENDING_MASK not V_NMI_BLOCKING_MASK While testing Hyper-V enabled Windows Server 2019 guests on Zen4 hardware I noticed that with vCPU count large enough (> 16) they sometimes froze at boot. With vCPU count of 64 they never booted successfully - suggesting some kind of a race condition. Since adding "vnmi=0" module parameter made these guests boot successfully it was clear that the problem is most likely (v)NMI-related. Running kvm-unit-tests quickly showed failing NMI-related tests cases, like "multiple nmi" and "pending nmi" from apic-split, x2apic and xapic tests and the NMI parts of eventinj test. The issue was that once one NMI was being serviced no other NMI was allowed to be set pending (NMI limit = 0), which was traced to svm_is_vnmi_pending() wrongly testing for the "NMI blocked" flag rather than for the "NMI pending" flag. Fix this by testing for the right flag in svm_is_vnmi_pending(). Once this is done, the NMI-related kvm-unit-tests pass successfully and the Windows guest no longer freezes at boot. Fixes: fa4c027a7956 ("KVM: x86: Add support for SVM's Virtual NMI") Signed-off-by: Maciej S. Szmigiero Reviewed-by: Sean Christopherson Link: https://lore.kernel.org/r/be4ca192eb0c1e69a210db3009ca984e6a54ae69.1684495380.git.maciej.szmigiero@oracle.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index ca32389f3c36b..54089f990c8f8 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3510,7 +3510,7 @@ static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu) if (!is_vnmi_enabled(svm)) return false; - return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK); + return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK); } static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu) From 8b703a49c9df5e74870381ad7ba9c85d8a74ed2c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 1 Jun 2023 18:19:19 -0700 Subject: [PATCH 298/303] KVM: x86: Account fastpath-only VM-Exits in vCPU stats Increment vcpu->stat.exits when handling a fastpath VM-Exit without going through any part of the "slow" path. Not bumping the exits stat can result in wildly misleading exit counts, e.g. if the primary reason the guest is exiting is to program the TSC deadline timer. Fixes: 404d5d7bff0d ("KVM: X86: Introduce more exit_fastpath_completion enum values") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230602011920.787844-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c0778ca39650f..04b57a336b34e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10758,6 +10758,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; break; } + + /* Note, VM-Exits that go down the "slow" path are accounted below. */ + ++vcpu->stat.exits; } /* From 4364b287982bd05bfafa461c80650c732001974b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 2 Jun 2023 16:32:48 -0700 Subject: [PATCH 299/303] KVM: x86: Bail from kvm_recalculate_phys_map() if x2APIC ID is out-of-bounds Bail from kvm_recalculate_phys_map() and disable the optimized map if the target vCPU's x2APIC ID is out-of-bounds, i.e. if the vCPU was added and/or enabled its local APIC after the map was allocated. This fixes an out-of-bounds access bug in the !x2apic_format path where KVM would write beyond the end of phys_map. Check the x2APIC ID regardless of whether or not x2APIC is enabled, as KVM's hardcodes x2APIC ID to be the vCPU ID, i.e. it can't change, and the map allocation in kvm_recalculate_apic_map() doesn't check for x2APIC being enabled, i.e. the check won't get false postivies. Note, this also affects the x2apic_format path, which previously just ignored the "x2apic_id > new->max_apic_id" case. That too is arguably a bug fix, as ignoring the vCPU meant that KVM would not send interrupts to the vCPU until the next map recalculation. In practice, that "bug" is likely benign as a newly present vCPU/APIC would immediately trigger a recalc. But, there's no functional downside to disabling the map, and a future patch will gracefully handle the -E2BIG case by retrying instead of simply disabling the optimized map. Opportunistically add a sanity check on the xAPIC ID size, along with a comment explaining why the xAPIC ID is guaranteed to be "good". Reported-by: Michal Luczaj Fixes: 5b84b0291702 ("KVM: x86: Honor architectural behavior for aliased 8-bit APIC IDs") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230602233250.1014316-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/lapic.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e542cf285b518..3c300a196bdf9 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -228,6 +228,23 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new, u32 xapic_id = kvm_xapic_id(apic); u32 physical_id; + /* + * For simplicity, KVM always allocates enough space for all possible + * xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on + * without the optimized map. + */ + if (WARN_ON_ONCE(xapic_id > new->max_apic_id)) + return -EINVAL; + + /* + * Bail if a vCPU was added and/or enabled its APIC between allocating + * the map and doing the actual calculations for the map. Note, KVM + * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if + * the compiler decides to reload x2apic_id after this check. + */ + if (x2apic_id > new->max_apic_id) + return -E2BIG; + /* * Deliberately truncate the vCPU ID when detecting a mismatched APIC * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a @@ -253,8 +270,7 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new, */ if (vcpu->kvm->arch.x2apic_format) { /* See also kvm_apic_match_physical_addr(). */ - if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) && - x2apic_id <= new->max_apic_id) + if (apic_x2apic_mode(apic) || x2apic_id > 0xff) new->phys_map[x2apic_id] = apic; if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) From 47d2804bc99ca873470df17c20737b28225a320d Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Fri, 2 Jun 2023 16:32:50 -0700 Subject: [PATCH 300/303] KVM: selftests: Add test for race in kvm_recalculate_apic_map() Keep switching between LAPIC_MODE_X2APIC and LAPIC_MODE_DISABLED during APIC map construction to hunt for TOCTOU bugs in KVM. KVM's optimized map recalc makes multiple passes over the list of vCPUs, and the calculations ignore vCPU's whose APIC is hardware-disabled, i.e. there's a window where toggling LAPIC_MODE_DISABLED is quite interesting. Signed-off-by: Michal Luczaj Co-developed-by: Sean Christopherson Link: https://lore.kernel.org/r/20230602233250.1014316-4-seanjc@google.com Signed-off-by: Sean Christopherson --- tools/testing/selftests/kvm/Makefile | 1 + .../kvm/x86_64/recalc_apic_map_test.c | 74 +++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 7a5ff646e7e79..4761b768b773c 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -116,6 +116,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests TEST_GEN_PROGS_x86_64 += x86_64/amx_test TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test +TEST_GEN_PROGS_x86_64 += x86_64/recalc_apic_map_test TEST_GEN_PROGS_x86_64 += access_tracking_perf_test TEST_GEN_PROGS_x86_64 += demand_paging_test TEST_GEN_PROGS_x86_64 += dirty_log_test diff --git a/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c b/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c new file mode 100644 index 0000000000000..4c416ebe7d66d --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Test edge cases and race conditions in kvm_recalculate_apic_map(). + */ + +#include +#include +#include + +#include "processor.h" +#include "test_util.h" +#include "kvm_util.h" +#include "apic.h" + +#define TIMEOUT 5 /* seconds */ + +#define LAPIC_DISABLED 0 +#define LAPIC_X2APIC (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) +#define MAX_XAPIC_ID 0xff + +static void *race(void *arg) +{ + struct kvm_lapic_state lapic = {}; + struct kvm_vcpu *vcpu = arg; + + while (1) { + /* Trigger kvm_recalculate_apic_map(). */ + vcpu_ioctl(vcpu, KVM_SET_LAPIC, &lapic); + pthread_testcancel(); + } + + return NULL; +} + +int main(void) +{ + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + struct kvm_vcpu *vcpuN; + struct kvm_vm *vm; + pthread_t thread; + time_t t; + int i; + + kvm_static_assert(KVM_MAX_VCPUS > MAX_XAPIC_ID); + + /* + * Create the max number of vCPUs supported by selftests so that KVM + * has decent amount of work to do when recalculating the map, i.e. to + * make the problematic window large enough to hit. + */ + vm = vm_create_with_vcpus(KVM_MAX_VCPUS, NULL, vcpus); + + /* + * Enable x2APIC on all vCPUs so that KVM doesn't bail from the recalc + * due to vCPUs having aliased xAPIC IDs (truncated to 8 bits). + */ + for (i = 0; i < KVM_MAX_VCPUS; i++) + vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC); + + ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0); + + vcpuN = vcpus[KVM_MAX_VCPUS - 1]; + for (t = time(NULL) + TIMEOUT; time(NULL) < t;) { + vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_X2APIC); + vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED); + } + + ASSERT_EQ(pthread_cancel(thread), 0); + ASSERT_EQ(pthread_join(thread, NULL), 0); + + kvm_vm_free(vm); + + return 0; +} From eb50d0f250e96ede9192d936d220cd97adc93b89 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Sun, 19 Mar 2023 11:53:32 +0900 Subject: [PATCH 301/303] selftests/ftrace: Choose target function for filter test from samples Since the event-filter-function.tc expects the 'exit_mmap()' directly calls 'kmem_cache_free()', this is vulnerable to code modifications. Choose the target function for the filter test from the sample event data so that it can keep test running correctly even if the caller function name will be changed. Link: https://lore.kernel.org/linux-trace-kernel/167919441260.1922645.18355804179347364057.stgit@mhiramat.roam.corp.google.com/ Link: https://lore.kernel.org/all/CA+G9fYtF-XEKi9YNGgR=Kf==7iRb2FrmEC7qtwAeQbfyah-UhA@mail.gmail.com/ Reported-by: Linux Kernel Functional Testing Fixes: 7f09d639b8c4 ("tracing/selftests: Add test for event filtering on function name") Signed-off-by: Masami Hiramatsu (Google) Acked-by: Steven Rostedt (Google) --- .../test.d/filter/event-filter-function.tc | 45 +++++++++++-------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc index e2ff3bf4df80f..2de7c61d1ae30 100644 --- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc +++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc @@ -9,18 +9,33 @@ fail() { #msg exit_fail } -echo "Test event filter function name" +sample_events() { + echo > trace + echo 1 > events/kmem/kmem_cache_free/enable + echo 1 > tracing_on + ls > /dev/null + echo 0 > tracing_on + echo 0 > events/kmem/kmem_cache_free/enable +} + echo 0 > tracing_on echo 0 > events/enable + +echo "Get the most frequently calling function" +sample_events + +target_func=`cut -d: -f3 trace | sed 's/call_site=\([^+]*\)+0x.*/\1/' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'` +if [ -z "$target_func" ]; then + exit_fail +fi echo > trace -echo 'call_site.function == exit_mmap' > events/kmem/kmem_cache_free/filter -echo 1 > events/kmem/kmem_cache_free/enable -echo 1 > tracing_on -ls > /dev/null -echo 0 > events/kmem/kmem_cache_free/enable -hitcnt=`grep kmem_cache_free trace| grep exit_mmap | wc -l` -misscnt=`grep kmem_cache_free trace| grep -v exit_mmap | wc -l` +echo "Test event filter function name" +echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter +sample_events + +hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l` +misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l` if [ $hitcnt -eq 0 ]; then exit_fail @@ -30,20 +45,14 @@ if [ $misscnt -gt 0 ]; then exit_fail fi -address=`grep ' exit_mmap$' /proc/kallsyms | cut -d' ' -f1` +address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1` echo "Test event filter function address" -echo 0 > tracing_on -echo 0 > events/enable -echo > trace echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter -echo 1 > events/kmem/kmem_cache_free/enable -echo 1 > tracing_on -sleep 1 -echo 0 > events/kmem/kmem_cache_free/enable +sample_events -hitcnt=`grep kmem_cache_free trace| grep exit_mmap | wc -l` -misscnt=`grep kmem_cache_free trace| grep -v exit_mmap | wc -l` +hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l` +misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l` if [ $hitcnt -eq 0 ]; then exit_fail From b05d39466ba111fc3775d5d46180b73c34ebe8f7 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 15 May 2023 09:26:04 -0700 Subject: [PATCH 302/303] leds: qcom-lpg: Fix PWM period limits The introduction of high resolution PWM support changed the order of the operations in the calculation of min and max period. The result in both divisions is in most cases a truncation to 0, which limits the period to the range of [0, 0]. Both numerators (and denominators) are within 64 bits, so the whole expression can be put directly into the div64_u64, instead of doing it partially. Fixes: b00d2ed37617 ("leds: rgb: leds-qcom-lpg: Add support for high resolution PWM") Reviewed-by: Caleb Connolly Tested-by: Steev Klimaszewski Signed-off-by: Bjorn Andersson Acked-by: Lee Jones Tested-by: Johan Hovold Tested-by: Neil Armstrong # on SM8550-QRD Link: https://lore.kernel.org/r/20230515162604.649203-1-quic_bjorande@quicinc.com Signed-off-by: Johan Hovold --- drivers/leds/rgb/leds-qcom-lpg.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c index 55a037234df1b..1c849814a4917 100644 --- a/drivers/leds/rgb/leds-qcom-lpg.c +++ b/drivers/leds/rgb/leds-qcom-lpg.c @@ -312,14 +312,14 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period) max_res = LPG_RESOLUTION_9BIT; } - min_period = (u64)NSEC_PER_SEC * - div64_u64((1 << pwm_resolution_arr[0]), clk_rate_arr[clk_len - 1]); + min_period = div64_u64((u64)NSEC_PER_SEC * (1 << pwm_resolution_arr[0]), + clk_rate_arr[clk_len - 1]); if (period <= min_period) return -EINVAL; /* Limit period to largest possible value, to avoid overflows */ - max_period = (u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV * - div64_u64((1 << LPG_MAX_M), 1024); + max_period = div64_u64((u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV * (1 << LPG_MAX_M), + 1024); if (period > max_period) period = max_period; From 9561de3a55bed6bdd44a12820ba81ec416e705a7 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 4 Jun 2023 14:04:27 -0400 Subject: [PATCH 303/303] Linux 6.4-rc5 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 836643eaefee0..09866a85bc2a2 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc5 NAME = Hurr durr I'ma ninja sloth # *DOCUMENTATION*