aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS4
-rw-r--r--block/blk-mq.c12
-rw-r--r--block/sed-opal.c6
-rw-r--r--drivers/block/loop.c10
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/clocksource/timer-of.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c13
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c13
-rw-r--r--drivers/gpu/drm/drm_writeback.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c184
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c5
-rw-r--r--drivers/nvme/host/core.c132
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/nvme/host/rdma.c19
-rw-r--r--drivers/nvme/host/tcp.c4
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c16
-rw-r--r--drivers/nvme/target/tcp.c12
-rw-r--r--drivers/s390/block/dasd_eckd.c81
-rw-r--r--fs/erofs/data.c10
-rw-r--r--fs/erofs/super.c4
-rw-r--r--fs/erofs/zdata.c12
-rw-r--r--fs/io_uring.c8
-rw-r--r--fs/statfs.c17
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/linux/nvme_ioctl.h23
-rw-r--r--include/uapi/linux/pg.h5
-rw-r--r--include/uapi/linux/sched.h28
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/sched/membarrier.c1
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c62
-rw-r--r--tools/testing/selftests/pidfd/Makefile2
65 files changed, 548 insertions, 350 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 296de2b51c832e..55199ef7fa744c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6112,7 +6112,10 @@ M: Gao Xiang <gaoxiang25@huawei.com>
M: Chao Yu <yuchao0@huawei.com>
L: linux-erofs@lists.ozlabs.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
+F: Documentation/filesystems/erofs.txt
F: fs/erofs/
+F: include/trace/events/erofs.h
ERRSEQ ERROR TRACKING INFRASTRUCTURE
M: Jeff Layton <jlayton@kernel.org>
@@ -9075,6 +9078,7 @@ F: security/keys/
KGDB / KDB /debug_core
M: Jason Wessel <jason.wessel@windriver.com>
M: Daniel Thompson <daniel.thompson@linaro.org>
+R: Douglas Anderson <dianders@chromium.org>
W: http://kgdb.wiki.kernel.org/
L: kgdb-bugreport@lists.sourceforge.net
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6e3b15f70cd7ae..ec791156e9ccd1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1992,10 +1992,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
/* bypass scheduler for flush rq */
blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true);
- } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
+ } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
+ !blk_queue_nonrot(q))) {
/*
* Use plugging if we have a ->commit_rqs() hook as well, as
* we know the driver uses bd->last in a smart fashion.
+ *
+ * Use normal plugging if this disk is slow HDD, as sequential
+ * IO may benefit a lot from plug merging.
*/
unsigned int request_count = plug->rq_count;
struct request *last = NULL;
@@ -2012,6 +2016,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
}
blk_add_rq_to_plug(plug, rq);
+ } else if (q->elevator) {
+ blk_mq_sched_insert_request(rq, false, true, true);
} else if (plug && !blk_queue_nomerges(q)) {
/*
* We do limited plugging. If the bio can be merged, do that.
@@ -2035,8 +2041,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
}
- } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
- !data.hctx->dispatch_busy)) {
+ } else if ((q->nr_hw_queues > 1 && is_sync) ||
+ !data.hctx->dispatch_busy) {
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
} else {
blk_mq_sched_insert_request(rq, false, true, true);
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 4e95a979216258..b4c761973ac102 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -129,7 +129,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
{ 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x84, 0x01 },
/* tables */
- [OPAL_TABLE_TABLE]
+ [OPAL_TABLE_TABLE] =
{ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01 },
[OPAL_LOCKINGRANGE_GLOBAL] =
{ 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, 0x01 },
@@ -372,8 +372,8 @@ static void check_geometry(struct opal_dev *dev, const void *data)
{
const struct d0_geometry_features *geo = data;
- dev->align = geo->alignment_granularity;
- dev->lowest_lba = geo->lowest_aligned_lba;
+ dev->align = be64_to_cpu(geo->alignment_granularity);
+ dev->lowest_lba = be64_to_cpu(geo->lowest_aligned_lba);
}
static int execute_step(struct opal_dev *dev,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1410fa89365389..f6f77eaa7217e0 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -994,6 +994,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
+ if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
+ /* In case of direct I/O, match underlying block size */
+ unsigned short bsize = bdev_logical_block_size(
+ inode->i_sb->s_bdev);
+
+ blk_queue_logical_block_size(lo->lo_queue, bsize);
+ blk_queue_physical_block_size(lo->lo_queue, bsize);
+ blk_queue_io_min(lo->lo_queue, bsize);
+ }
+
loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c2f7de9dc54330..de434feb873af8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2520,4 +2520,4 @@ void add_bootloader_randomness(const void *buf, unsigned int size)
else
add_device_randomness(buf, size);
}
-EXPORT_SYMBOL_GPL(add_bootloader_randomness); \ No newline at end of file
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index d8c2bd4391d027..11ff701ff4bb99 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -25,7 +25,9 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
struct clock_event_device *clkevt = &to->clkevt;
- of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) :
+ if (of_irq->percpu)
+ free_percpu_irq(of_irq->irq, clkevt);
+ else
free_irq(of_irq->irq, clkevt);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 42e2c1f57152d7..00962a659009b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
- amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
+ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index eba42c752bca32..82155ac3288a07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
u32 val = 0;
u32 count = 0;
struct device *dev;
- struct i2s_platform_data *i2s_pdata;
+ struct i2s_platform_data *i2s_pdata = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
GFP_KERNEL);
- if (adev->acp.acp_cell == NULL)
- return -ENOMEM;
+ if (adev->acp.acp_cell == NULL) {
+ r = -ENOMEM;
+ goto failure;
+ }
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto failure;
}
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
- kfree(adev->acp.acp_res);
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto failure;
}
switch (adev->asic_type) {
@@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
- return r;
+ goto failure;
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
if (r) {
dev_err(dev, "Failed to add dev to genpd\n");
- return r;
+ goto failure;
}
}
@@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto failure;
}
udelay(100);
}
@@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
- return -ETIMEDOUT;
+ r = -ETIMEDOUT;
+ goto failure;
}
udelay(100);
}
@@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0;
+
+failure:
+ kfree(i2s_pdata);
+ kfree(adev->acp.acp_res);
+ kfree(adev->acp.acp_cell);
+ kfree(adev->acp.acp_genpd);
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 264677ab248a1a..6f8aaf655a9fc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -81,9 +81,10 @@
* - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
+ * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 34
+#define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0
#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 554a59b3c4a65c..6ee4021910e2cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -165,6 +165,7 @@ struct amdgpu_gfx_config {
uint32_t num_sc_per_sh;
uint32_t num_packer_per_sc;
uint32_t pa_sc_tile_steering_override;
+ uint64_t tcc_disabled_mask;
};
struct amdgpu_cu_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index f6147528be6499..f2c097983f48fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -787,6 +787,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.pa_sc_tile_steering_override =
adev->gfx.config.pa_sc_tile_steering_override;
+ dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
+
return copy_to_user(out, &dev_info,
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e2fb141ff2e566..5251352f592287 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -603,14 +603,12 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
-#if 0
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
return;
}
-#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 638c821611abb2..957811b73672a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -1691,6 +1691,17 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
}
}
+static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
+{
+ /* TCCs are global (not instanced). */
+ uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+ RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+
+ adev->gfx.config.tcc_disabled_mask =
+ REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
+ (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
+}
+
static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
{
u32 tmp;
@@ -1702,6 +1713,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
+ gfx_v10_0_get_tcc_info(adev);
adev->gfx.config.pa_sc_tile_steering_override =
gfx_v10_0_init_pa_sc_tile_steering_override(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 85393a99a848be..de9b995b65b1aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -317,10 +317,12 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- amdgpu_inc_vram_lost(adev);
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
ret = smu_baco_reset(smu);
} else {
- amdgpu_inc_vram_lost(adev);
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
ret = nv_asic_mode1_reset(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f70658a536a933..f8ab80c8801b1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -558,12 +558,14 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
{
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
- amdgpu_inc_vram_lost(adev);
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
return soc15_mode2_reset(adev);
default:
- amdgpu_inc_vram_lost(adev);
+ if (!adev->in_suspend)
+ amdgpu_inc_vram_lost(adev);
return soc15_asic_mode1_reset(adev);
}
}
@@ -771,8 +773,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#else
-# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
break;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8cab6da512a065..a52f0b13a2c8a1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2385,8 +2385,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
- if (adev->asic_type == CHIP_RENOIR)
- dm->dc->debug.disable_stutter = true;
return 0;
fail:
@@ -6019,7 +6017,9 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
+#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source source;
+#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 1787b9bf800a0c..76d54885374abb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -668,6 +668,7 @@ struct clock_source *dce100_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 318e9c2e2ca88c..89620adc81d8b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -714,6 +714,7 @@ struct clock_source *dce110_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 83e1878161c922..21a657e7930636 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -687,6 +687,7 @@ struct clock_source *dce112_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 8b85e5274bbadd..7c52f7f9196c95 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -500,6 +500,7 @@ static struct clock_source *dce120_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 4625df9f9fd2f8..643ccb0ade006f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -701,6 +701,7 @@ struct clock_source *dce80_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 59305e411a6663..1599bb97111114 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -786,6 +786,7 @@ struct clock_source *dcn10_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index b4e3ce22ed5294..5a2763daff4d65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1077,6 +1077,7 @@ struct clock_source *dcn20_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 8cd9de8b1a7a19..ef673bffc241ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -3,7 +3,17 @@
DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
-CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+ cc_stack_align := -mpreferred-stack-boundary=4
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+ cc_stack_align := -mstack-alignment=16
+endif
+
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align)
+
+ifdef CONFIG_CC_IS_CLANG
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
+endif
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 456cd0e3289c66..3b6ed60dcd3510 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -39,9 +39,6 @@
* ways. Unless there is something clearly wrong with it the code should
* remain as-is as it provides us with a guarantee from HW that it is correct.
*/
-
-typedef unsigned int uint;
-
typedef struct {
double DPPCLK;
double DISPCLK;
@@ -4774,7 +4771,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- uint m;
+ unsigned int m;
locals->cursor_bw[k] = 0;
locals->cursor_bw_pre[k] = 0;
@@ -5285,7 +5282,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
double FullDETBufferingTimeYStutterCriticalPlane = 0;
double TimeToFinishSwathTransferStutterCriticalPlane = 0;
- uint k, j;
+ unsigned int k, j;
mode_lib->vba.TotalActiveDPP = 0;
mode_lib->vba.TotalDCCActiveDPP = 0;
@@ -5507,7 +5504,7 @@ static void CalculateDCFCLKDeepSleep(
double DPPCLK[],
double *DCFCLKDeepSleep)
{
- uint k;
+ unsigned int k;
double DisplayPipeLineDeliveryTimeLuma;
double DisplayPipeLineDeliveryTimeChroma;
//double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
@@ -5727,7 +5724,7 @@ static void CalculatePixelDeliveryTimes(
double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
{
double req_per_swath_ub;
- uint k;
+ unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (VRatio[k] <= 1) {
@@ -5869,7 +5866,7 @@ static void CalculateMetaAndPTETimes(
unsigned int dpte_groups_per_row_chroma_ub;
unsigned int num_group_per_lower_vm_stage;
unsigned int num_req_per_lower_vm_stage;
- uint k;
+ unsigned int k;
for (k = 0; k < NumberOfActivePlanes; ++k) {
if (GPUVMEnable == true) {
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 33960fb38a5d73..4acf139ea01409 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -843,6 +843,8 @@ static int smu_sw_init(void *handle)
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
+ mutex_init(&smu->sensor_lock);
+
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index f1f072012facb1..d493a3f8c07a63 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -1018,6 +1018,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1044,6 +1045,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
default:
ret = smu_smc_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 6109815a0401a7..23171a4d9a31cf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -344,6 +344,7 @@ struct smu_context
const struct smu_funcs *funcs;
const struct pptable_funcs *ppt_funcs;
struct mutex mutex;
+ struct mutex sensor_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 12c0e469bf351e..0b461404af6b48 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -547,7 +547,7 @@ static int navi10_get_metrics_table(struct smu_context *smu,
struct smu_table_context *smu_table= &smu->smu_table;
int ret = 0;
- if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+ if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
(void *)smu_table->metrics_table, false);
if (ret) {
@@ -1386,6 +1386,7 @@ static int navi10_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1409,6 +1410,7 @@ static int navi10_read_sensor(struct smu_context *smu,
default:
ret = smu_smc_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 64386ee3f87810..bbd8ebd58434bd 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3023,6 +3023,7 @@ static int vega20_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
+ mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -3048,6 +3049,7 @@ static int vega20_read_sensor(struct smu_context *smu,
default:
ret = smu_smc_read_sensor(smu, sensor, data, size);
}
+ mutex_unlock(&smu->sensor_lock);
return ret;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 2851cac94d8699..b72840c06ab767 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
struct komeda_data_flow_cfg dflow;
int err;
- if (!writeback_job || !writeback_job->fb) {
+ if (!writeback_job)
return 0;
- }
if (!crtc_st->active) {
DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
@@ -166,8 +165,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
&komeda_wb_encoder_helper_funcs,
formats, n_formats);
komeda_put_fourcc_list(formats);
- if (err)
+ if (err) {
+ kfree(kwb_conn);
return err;
+ }
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 22c0847986df90..875a3a9eabfa1e 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -131,7 +131,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_framebuffer *fb;
int i, n_planes;
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
fb = conn_state->writeback_job->fb;
@@ -248,7 +248,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
mw_state = to_mw_state(conn_state);
- if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+ if (conn_state->writeback_job) {
struct drm_framebuffer *fb = conn_state->writeback_job->fb;
DRM_DEV_DEBUG_DRIVER(drm->dev,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 419381abbdd16e..14aeaf73632100 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
return -EINVAL;
}
- if (writeback_job->out_fence && !writeback_job->fb) {
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
- connector->base.id, connector->name);
- return -EINVAL;
+ if (!writeback_job->fb) {
+ if (writeback_job->out_fence) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+ connector->base.id, connector->name);
+ return -EINVAL;
+ }
+
+ drm_writeback_cleanup_job(writeback_job);
+ state->writeback_job = NULL;
}
return 0;
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index ff138b6ec48bad..43d9e3bb3a9434 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job)
if (job->fb)
drm_framebuffer_put(job->fb);
+ if (job->out_fence)
+ dma_fence_put(job->out_fence);
+
kfree(job);
}
EXPORT_SYMBOL(drm_writeback_cleanup_job);
@@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
{
unsigned long flags;
struct drm_writeback_job *job;
+ struct dma_fence *out_fence;
spin_lock_irqsave(&wb_connector->job_lock, flags);
job = list_first_entry_or_null(&wb_connector->job_queue,
struct drm_writeback_job,
list_entry);
- if (job) {
+ if (job)
list_del(&job->list_entry);
- if (job->out_fence) {
- if (status)
- dma_fence_set_error(job->out_fence, status);
- dma_fence_signal(job->out_fence);
- dma_fence_put(job->out_fence);
- }
- }
+
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
if (WARN_ON(!job))
return;
+ out_fence = job->out_fence;
+ if (out_fence) {
+ if (status)
+ dma_fence_set_error(out_fence, status);
+ dma_fence_signal(out_fence);
+ dma_fence_put(out_fence);
+ job->out_fence = NULL;
+ }
+
INIT_WORK(&job->cleanup_work, cleanup_work);
queue_work(system_long_wq, &job->cleanup_work);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index b51d1ceb873937..ce05e805b08fba 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7261,7 +7261,7 @@ retry:
pipe_config->fdi_lanes = lane;
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false);
+ link_bw, &pipe_config->fdi_m_n, false, false);
ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EDEADLK)
@@ -7508,11 +7508,15 @@ void
intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n)
+ bool constant_n, bool fec_enable)
{
- m_n->tu = 64;
+ u32 data_clock = bits_per_pixel * pixel_clock;
+
+ if (fec_enable)
+ data_clock = intel_dp_mode_to_fec_clock(data_clock);
- compute_m_n(bits_per_pixel * pixel_clock,
+ m_n->tu = 64;
+ compute_m_n(data_clock,
link_clock * nlanes * 8,
&m_n->gmch_m, &m_n->gmch_n,
constant_n);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index e57e6969051d06..01fa87ad327092 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -414,7 +414,7 @@ enum phy_fia {
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
- bool constant_n);
+ bool constant_n, bool fec_enable);
bool is_ccs_modifier(u64 modifier);
void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 921ad0a2f7ba7c..57e9f0ba331b4e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -78,8 +78,8 @@
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
-#define DP_DSC_FEC_OVERHEAD_FACTOR 976
+/* DP DSC FEC Overhead factor = 1/(0.972261) */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -494,6 +494,97 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
return 0;
}
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
+{
+ return div_u64(mul_u32_u32(mode_clock, 1000000U),
+ DP_DSC_FEC_OVERHEAD_FACTOR);
+}
+
+static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
+ u32 mode_clock, u32 mode_hdisplay)
+{
+ u32 bits_per_pixel, max_bpp_small_joiner_ram;
+ int i;
+
+ /*
+ * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+ * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
+ * for SST -> TimeSlotsPerMTP is 1,
+ * for MST -> TimeSlotsPerMTP has to be calculated
+ */
+ bits_per_pixel = (link_clock * lane_count * 8) /
+ intel_dp_mode_to_fec_clock(mode_clock);
+ DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+
+ /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+ max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
+ DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+
+ /*
+ * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
+ * check, output bpp from small joiner RAM check)
+ */
+ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
+ return 0;
+ }
+
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ bits_per_pixel = valid_dsc_bpp[i];
+
+ /*
+ * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+ * fractional part is 0
+ */
+ return bits_per_pixel << 4;
+}
+
+static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock, int mode_hdisplay)
+{
+ u8 min_slice_count, i;
+ int max_slice_width;
+
+ if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_0);
+ else
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+ max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+ if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+ DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
+ return 0;
+ }
+ /* Also take into account max slice width */
+ min_slice_count = min_t(u8, min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay,
+ max_slice_width));
+
+ /* Find the closest match to the valid slice count values */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+ if (valid_dsc_slicecount[i] >
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ false))
+ break;
+ if (min_slice_count <= valid_dsc_slicecount[i])
+ return valid_dsc_slicecount[i];
+ }
+
+ DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ return 0;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -2226,7 +2317,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
- constant_n);
+ constant_n, pipe_config->fec_enable);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2236,7 +2327,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
&pipe_config->dp_m2_n2,
- constant_n);
+ constant_n, pipe_config->fec_enable);
}
if (!HAS_DDI(dev_priv))
@@ -4323,91 +4414,6 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay)
-{
- u16 bits_per_pixel, max_bpp_small_joiner_ram;
- int i;
-
- /*
- * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
- * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
- * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
- * for MST -> TimeSlotsPerMTP has to be calculated
- */
- bits_per_pixel = (link_clock * lane_count * 8 *
- DP_DSC_FEC_OVERHEAD_FACTOR) /
- mode_clock;
-
- /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
- max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
- mode_hdisplay;
-
- /*
- * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
- * check, output bpp from small joiner RAM check)
- */
- bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
-
- /* Error out if the max bpp is less than smallest allowed valid bpp */
- if (bits_per_pixel < valid_dsc_bpp[0]) {
- DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
- return 0;
- }
-
- /* Find the nearest match in the array of known BPPs from VESA */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
- if (bits_per_pixel < valid_dsc_bpp[i + 1])
- break;
- }
- bits_per_pixel = valid_dsc_bpp[i];
-
- /*
- * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
- * fractional part is 0
- */
- return bits_per_pixel << 4;
-}
-
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
- int mode_clock,
- int mode_hdisplay)
-{
- u8 min_slice_count, i;
- int max_slice_width;
-
- if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_0);
- else
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_1);
-
- max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
- if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
- DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
- max_slice_width);
- return 0;
- }
- /* Also take into account max slice width */
- min_slice_count = min_t(u8, min_slice_count,
- DIV_ROUND_UP(mode_hdisplay,
- max_slice_width));
-
- /* Find the closest match to the valid slice count values */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
- if (valid_dsc_slicecount[i] >
- drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
- false))
- break;
- if (min_slice_count <= valid_dsc_slicecount[i])
- return valid_dsc_slicecount[i];
- }
-
- DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
- return 0;
-}
-
static void
intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 657bbb1f5ed08f..00981fb9414b6a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -102,10 +102,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay);
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
- int mode_hdisplay);
bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
@@ -118,4 +114,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 6df240a01b8c3b..600873c796d093 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -81,7 +81,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
&crtc_state->dp_m_n,
- constant_n);
+ constant_n, crtc_state->fec_enable);
crtc_state->dp_m_n.tu = slots;
return 0;
@@ -615,7 +615,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->base.port;
- intel_encoder->crtc_mask = BIT(pipe);
+ intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index dea63be1964f2f..cae25e493128d7 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -1528,6 +1528,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
+ unsigned int stride = plane_state->color_plane[0].stride;
unsigned int cpp = fb->format->cpp[0];
unsigned int width_bytes;
int min_width, min_height;
@@ -1569,9 +1570,9 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- if (width_bytes > 4096 || fb->pitches[0] > 4096) {
+ if (stride > 4096) {
DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
- fb->pitches[0], 4096);
+ stride, 4096);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index e226324adb6978..4bdd63b5710029 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1083,7 +1083,7 @@ static const struct dss_features omap34xx_dss_feats = {
static const struct dss_features omap3630_dss_feats = {
.model = DSS_MODEL_OMAP3,
- .fck_div_max = 32,
+ .fck_div_max = 31,
.fck_freq_max = 173000000,
.dss_fck_multiplier = 1,
.parent_clk_name = "dpll4_ck",
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index ae07290bba6a4b..04efa78d70b6ea 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *fb;
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
fb = conn_state->writeback_job->fb;
@@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
unsigned int i;
state = rcrtc->writeback.base.state;
- if (!state || !state->writeback_job || !state->writeback_job->fb)
+ if (!state || !state->writeback_job)
return;
fb = state->writeback_job->fb;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 525dc1c0f1c148..530edb3b51cc0c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -7,6 +7,7 @@
#include <linux/gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 1ce4d7142b6e9f..bf720206727f08 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -231,7 +231,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
int i;
conn_state = drm_atomic_get_new_connector_state(state, conn);
- if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ if (!conn_state->writeback_job)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -271,8 +271,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
u32 ctrl;
int i;
- if (WARN_ON(!conn_state->writeback_job ||
- !conn_state->writeback_job->fb))
+ if (WARN_ON(!conn_state->writeback_job))
return;
mode = &conn_state->crtc->state->adjusted_mode;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 108f60b46804a4..fd7dea36c3b683 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -102,10 +102,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
*/
if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
- revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
/* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
+ /*
+ * Revalidate after unblocking dispatchers that may be holding bd_butex
+ */
+ revalidate_disk(ns->disk);
}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -847,7 +850,7 @@ out:
static int nvme_submit_user_cmd(struct request_queue *q,
struct nvme_command *cmd, void __user *ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
- u32 meta_seed, u32 *result, unsigned timeout)
+ u32 meta_seed, u64 *result, unsigned timeout)
{
bool write = nvme_is_write(cmd);
struct nvme_ns *ns = q->queuedata;
@@ -888,7 +891,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
else
ret = nvme_req(req)->status;
if (result)
- *result = le32_to_cpu(nvme_req(req)->result.u32);
+ *result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
@@ -1335,6 +1338,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_command c;
unsigned timeout = 0;
u32 effects;
+ u64 result;
+ int status;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+ if (cmd.flags)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+ if (cmd.timeout_ms)
+ timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+ effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+ (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+ (void __user *)(uintptr_t)cmd.metadata,
+ cmd.metadata_len, 0, &result, timeout);
+ nvme_passthru_end(ctrl, effects);
+
+ if (status >= 0) {
+ if (put_user(result, &ucmd->result))
+ return -EFAULT;
+ }
+
+ return status;
+}
+
+static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ struct nvme_passthru_cmd64 __user *ucmd)
+{
+ struct nvme_passthru_cmd64 cmd;
+ struct nvme_command c;
+ unsigned timeout = 0;
+ u32 effects;
int status;
if (!capable(CAP_SYS_ADMIN))
@@ -1405,6 +1456,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
srcu_read_unlock(&head->srcu, idx);
}
+static bool is_ctrl_ioctl(unsigned int cmd)
+{
+ if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
+ return true;
+ if (is_sed_ioctl(cmd))
+ return true;
+ return false;
+}
+
+static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
+ void __user *argp,
+ struct nvme_ns_head *head,
+ int srcu_idx)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int ret;
+
+ nvme_get_ctrl(ns->ctrl);
+ nvme_put_ns_from_disk(head, srcu_idx);
+
+ switch (cmd) {
+ case NVME_IOCTL_ADMIN_CMD:
+ ret = nvme_user_cmd(ctrl, NULL, argp);
+ break;
+ case NVME_IOCTL_ADMIN64_CMD:
+ ret = nvme_user_cmd64(ctrl, NULL, argp);
+ break;
+ default:
+ ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
+ break;
+ }
+ nvme_put_ctrl(ctrl);
+ return ret;
+}
+
static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -1422,20 +1508,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
* seperately and drop the ns SRCU reference early. This avoids a
* deadlock when deleting namespaces using the passthrough interface.
*/
- if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
- struct nvme_ctrl *ctrl = ns->ctrl;
-
- nvme_get_ctrl(ns->ctrl);
- nvme_put_ns_from_disk(head, srcu_idx);
-
- if (cmd == NVME_IOCTL_ADMIN_CMD)
- ret = nvme_user_cmd(ctrl, NULL, argp);
- else
- ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
-
- nvme_put_ctrl(ctrl);
- return ret;
- }
+ if (is_ctrl_ioctl(cmd))
+ return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
switch (cmd) {
case NVME_IOCTL_ID:
@@ -1448,6 +1522,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
case NVME_IOCTL_SUBMIT_IO:
ret = nvme_submit_io(ns, argp);
break;
+ case NVME_IOCTL_IO64_CMD:
+ ret = nvme_user_cmd64(ns->ctrl, ns, argp);
+ break;
default:
if (ns->ndev)
ret = nvme_nvm_ioctl(ns, cmd, arg);
@@ -2289,6 +2366,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+ },
+ {
+ /*
+ * This Kingston E8FK11.T firmware version has no interrupt
+ * after resume with actions related to suspend to idle
+ * https://bugzilla.kernel.org/show_bug.cgi?id=204887
+ */
+ .vid = 0x2646,
+ .fr = "E8FK11.T",
+ .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
}
};
@@ -2540,8 +2627,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
list_add_tail(&subsys->entry, &nvme_subsystems);
}
- if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
- dev_name(ctrl->device))) {
+ ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
+ dev_name(ctrl->device));
+ if (ret) {
dev_err(ctrl->device,
"failed to create sysfs link from subsystem.\n");
goto out_put_subsystem;
@@ -2838,6 +2926,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case NVME_IOCTL_ADMIN_CMD:
return nvme_user_cmd(ctrl, NULL, argp);
+ case NVME_IOCTL_ADMIN64_CMD:
+ return nvme_user_cmd64(ctrl, NULL, argp);
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
@@ -3045,6 +3135,8 @@ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
nvme_show_int_function(cntlid);
nvme_show_int_function(numa_node);
+nvme_show_int_function(queue_count);
+nvme_show_int_function(sqsize);
static ssize_t nvme_sysfs_delete(struct device *dev,
struct device_attribute *attr, const char *buf,
@@ -3125,6 +3217,8 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_address.attr,
&dev_attr_state.attr,
&dev_attr_numa_node.attr,
+ &dev_attr_queue_count.attr,
+ &dev_attr_sqsize.attr,
NULL
};
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b5013c101b3528..38a83ef5bcd351 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -221,6 +221,7 @@ struct nvme_ctrl {
u16 oacs;
u16 nssa;
u16 nr_streams;
+ u16 sqsize;
u32 max_namespaces;
atomic_t abort_limit;
u8 vwc;
@@ -269,7 +270,6 @@ struct nvme_ctrl {
u16 hmmaxd;
/* Fabrics only */
- u16 sqsize;
u32 ioccsz;
u32 iorcsz;
u16 icdoff;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c0808f9eb8ab69..bb88681f4dc35c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2946,11 +2946,21 @@ static int nvme_suspend(struct device *dev)
if (ret < 0)
goto unfreeze;
+ /*
+ * A saved state prevents pci pm from generically controlling the
+ * device's power. If we're using protocol specific settings, we don't
+ * want pci interfering.
+ */
+ pci_save_state(pdev);
+
ret = nvme_set_power_state(ctrl, ctrl->npss);
if (ret < 0)
goto unfreeze;
if (ret) {
+ /* discard the saved state */
+ pci_load_saved_state(pdev, NULL);
+
/*
* Clearing npss forces a controller reset on resume. The
* correct value will be resdicovered then.
@@ -2958,14 +2968,7 @@ static int nvme_suspend(struct device *dev)
nvme_dev_disable(ndev, true);
ctrl->npss = 0;
ret = 0;
- goto unfreeze;
}
- /*
- * A saved state prevents pci pm from generically controlling the
- * device's power. If we're using protocol specific settings, we don't
- * want pci interfering.
- */
- pci_save_state(pdev);
unfreeze:
nvme_unfreeze(ctrl);
return ret;
@@ -3090,6 +3093,9 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dfa07bb9dfeb51..4d280160dd3fd3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
{
return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- ibdev->attrs.max_fast_reg_page_list_len);
+ ibdev->attrs.max_fast_reg_page_list_len - 1);
}
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
const int cq_factor = send_wr_factor + 1; /* + RECV */
int comp_vector, idx = nvme_rdma_queue_idx(queue);
enum ib_poll_context poll_ctx;
- int ret;
+ int ret, pages_per_mr;
queue->device = nvme_rdma_find_get_device(queue->cm_id);
if (!queue->device) {
@@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
goto out_destroy_qp;
}
+ /*
+ * Currently we don't use SG_GAPS MR's so if the first entry is
+ * misaligned we'll end up using two entries for a single data page,
+ * so one additional entry is required.
+ */
+ pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
queue->queue_size,
IB_MR_TYPE_MEM_REG,
- nvme_rdma_get_max_fr_pages(ibdev), 0);
+ pages_per_mr, 0);
if (ret) {
dev_err(queue->ctrl->ctrl.device,
"failed to initialize MR pool sized %d for QID %d\n",
@@ -614,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
if (!ret) {
set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
} else {
- __nvme_rdma_stop_queue(queue);
+ if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ __nvme_rdma_stop_queue(queue);
dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret);
}
@@ -820,8 +827,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (error)
goto out_stop_queue;
- ctrl->ctrl.max_hw_sectors =
- (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
+ ctrl->ctrl.max_segments = ctrl->max_fr_pages;
+ ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4ffd5957637a53..385a5212c10f19 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1042,7 +1042,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
{
struct nvme_tcp_queue *queue =
container_of(w, struct nvme_tcp_queue, io_work);
- unsigned long start = jiffies + msecs_to_jiffies(1);
+ unsigned long deadline = jiffies + msecs_to_jiffies(1);
do {
bool pending = false;
@@ -1067,7 +1067,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
if (!pending)
return;
- } while (time_after(jiffies, start)); /* quota is exhausted */
+ } while (!time_after(jiffies, deadline)); /* quota is exhausted */
queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index de0bff70ebb68b..32008d85172bc7 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -11,10 +11,10 @@
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
{
const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
- /* Number of physical blocks per logical block. */
- const u32 ppl = ql->physical_block_size / ql->logical_block_size;
- /* Physical blocks per logical block, 0's based. */
- const __le16 ppl0b = to0based(ppl);
+ /* Number of logical blocks per physical block. */
+ const u32 lpp = ql->physical_block_size / ql->logical_block_size;
+ /* Logical blocks per physical block, 0's based. */
+ const __le16 lpp0b = to0based(lpp);
/*
* For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -25,9 +25,9 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
* field from the identify controller data structure should be used.
*/
id->nsfeat |= 1 << 1;
- id->nawun = ppl0b;
- id->nawupf = ppl0b;
- id->nacwu = ppl0b;
+ id->nawun = lpp0b;
+ id->nawupf = lpp0b;
+ id->nacwu = lpp0b;
/*
* Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
*/
id->nsfeat |= 1 << 4;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
- id->npwg = ppl0b;
+ id->npwg = lpp0b;
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index bf4f03474e8992..d535080b781f95 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -348,8 +348,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
return 0;
err:
- if (cmd->req.sg_cnt)
- sgl_free(cmd->req.sg);
+ sgl_free(cmd->req.sg);
return NVME_SC_INTERNAL;
}
@@ -554,8 +553,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
if (queue->nvme_sq.sqhd_disabled) {
kfree(cmd->iov);
- if (cmd->req.sg_cnt)
- sgl_free(cmd->req.sg);
+ sgl_free(cmd->req.sg);
}
return 1;
@@ -586,8 +584,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
return -EAGAIN;
kfree(cmd->iov);
- if (cmd->req.sg_cnt)
- sgl_free(cmd->req.sg);
+ sgl_free(cmd->req.sg);
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
return 1;
@@ -1310,8 +1307,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
nvmet_req_uninit(&cmd->req);
nvmet_tcp_unmap_pdu_iovec(cmd);
kfree(cmd->iov);
- if (cmd->req.sg_cnt)
- sgl_free(cmd->req.sg);
+ sgl_free(cmd->req.sg);
}
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fc53e1e221f0a0..c94184d080f84d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1553,8 +1553,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
if (rc == 0) {
memcpy(&private->vsq, vsq, sizeof(*vsq));
} else {
- dev_warn(&device->cdev->dev,
- "Reading the volume storage information failed with rc=%d\n", rc);
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading the volume storage information failed with rc=%d", rc);
}
if (useglobal)
@@ -1737,8 +1737,8 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
if (rc == 0) {
dasd_eckd_cpy_ext_pool_data(device, lcq);
} else {
- dev_warn(&device->cdev->dev,
- "Reading the logical configuration failed with rc=%d\n", rc);
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading the logical configuration failed with rc=%d", rc);
}
dasd_sfree_request(cqr, cqr->memdev);
@@ -2020,14 +2020,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
dasd_eckd_read_features(device);
/* Read Volume Information */
- rc = dasd_eckd_read_vol_info(device);
- if (rc)
- goto out_err3;
+ dasd_eckd_read_vol_info(device);
/* Read Extent Pool Information */
- rc = dasd_eckd_read_ext_pool_info(device);
- if (rc)
- goto out_err3;
+ dasd_eckd_read_ext_pool_info(device);
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -2059,9 +2055,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
- if (dasd_eckd_is_ese(device))
- dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1);
-
dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
"with %d cylinders, %d heads, %d sectors%s\n",
private->rdc_data.dev_type,
@@ -3695,14 +3688,6 @@ static int dasd_eckd_release_space(struct dasd_device *device,
return -EINVAL;
}
-static struct dasd_ccw_req *
-dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block,
- struct request *req, sector_t first_trk,
- sector_t last_trk)
-{
- return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1);
-}
-
static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_device *startdev,
struct dasd_block *block,
@@ -4447,10 +4432,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
cmdwtd = private->features.feature[12] & 0x40;
use_prefix = private->features.feature[8] & 0x01;
- if (req_op(req) == REQ_OP_DISCARD)
- return dasd_eckd_build_cp_discard(startdev, block, req,
- first_trk, last_trk);
-
cqr = NULL;
if (cdlspecial || dasd_page_cache) {
/* do nothing, just fall through to the cmd mode single case */
@@ -4729,14 +4710,12 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
struct dasd_block *block,
struct request *req)
{
- struct dasd_device *startdev = NULL;
struct dasd_eckd_private *private;
- struct dasd_ccw_req *cqr;
+ struct dasd_device *startdev;
unsigned long flags;
+ struct dasd_ccw_req *cqr;
- /* Discard requests can only be processed on base devices */
- if (req_op(req) != REQ_OP_DISCARD)
- startdev = dasd_alias_get_start_dev(base);
+ startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
private = startdev->private;
@@ -5663,14 +5642,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
dasd_eckd_read_features(device);
/* Read Volume Information */
- rc = dasd_eckd_read_vol_info(device);
- if (rc)
- goto out_err2;
+ dasd_eckd_read_vol_info(device);
/* Read Extent Pool Information */
- rc = dasd_eckd_read_ext_pool_info(device);
- if (rc)
- goto out_err2;
+ dasd_eckd_read_ext_pool_info(device);
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -6521,20 +6496,8 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
unsigned int logical_block_size = block->bp_block;
struct request_queue *q = block->request_queue;
struct dasd_device *device = block->base;
- struct dasd_eckd_private *private;
- unsigned int max_discard_sectors;
- unsigned int max_bytes;
- unsigned int ext_bytes; /* Extent Size in Bytes */
- int recs_per_trk;
- int trks_per_cyl;
- int ext_limit;
- int ext_size; /* Extent Size in Cylinders */
int max;
- private = device->private;
- trks_per_cyl = private->rdc_data.trk_per_cyl;
- recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size);
-
if (device->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
@@ -6555,28 +6518,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
- if (dasd_eckd_is_ese(device)) {
- /*
- * Depending on the extent size, up to UINT_MAX bytes can be
- * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the
- * device limits should be exceeded.
- */
- ext_size = dasd_eckd_ext_size(device);
- ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX);
- ext_bytes = ext_size * trks_per_cyl * recs_per_trk *
- logical_block_size;
- max_bytes = UINT_MAX - (UINT_MAX % ext_bytes);
- if (max_bytes / ext_bytes > ext_limit)
- max_bytes = ext_bytes * ext_limit;
-
- max_discard_sectors = max_bytes / 512;
-
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
- q->limits.discard_granularity = ext_bytes;
- q->limits.discard_alignment = ext_bytes;
- }
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 8a9fcbd0e8ac19..fc3a8d8064f84f 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -34,11 +34,15 @@ static void erofs_readendio(struct bio *bio)
struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
{
- struct inode *const bd_inode = sb->s_bdev->bd_inode;
- struct address_space *const mapping = bd_inode->i_mapping;
+ struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
+ struct page *page;
- return read_cache_page_gfp(mapping, blkaddr,
+ page = read_cache_page_gfp(mapping, blkaddr,
mapping_gfp_constraint(mapping, ~__GFP_FS));
+ /* should already be PageUptodate */
+ if (!IS_ERR(page))
+ lock_page(page);
+ return page;
}
static int erofs_map_blocks_flatmode(struct inode *inode,
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index caf9a95173b0fb..0e369494f2f2c4 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -105,9 +105,9 @@ static int erofs_read_superblock(struct super_block *sb)
int ret;
page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
- if (!page) {
+ if (IS_ERR(page)) {
erofs_err(sb, "cannot read erofs superblock");
- return -EIO;
+ return PTR_ERR(page);
}
sbi = EROFS_SB(sb);
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 96e34c90f81438..fad80c97d2476f 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -575,7 +575,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct erofs_map_blocks *const map = &fe->map;
struct z_erofs_collector *const clt = &fe->clt;
const loff_t offset = page_offset(page);
- bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED);
+ bool tight = true;
enum z_erofs_cache_alloctype cache_strategy;
enum z_erofs_page_type page_type;
@@ -628,8 +628,16 @@ restart_now:
preload_compressed_pages(clt, MNGD_MAPPING(sbi),
cache_strategy, pagepool);
- tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
hitted:
+ /*
+ * Ensure the current partial page belongs to this submit chain rather
+ * than other concurrent submit chains or the noio(bypass) chain since
+ * those chains are handled asynchronously thus the page cannot be used
+ * for inplace I/O or pagevec (should be processed in strict order.)
+ */
+ tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
+ clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
+
cur = end - min_t(unsigned int, offset + end - map->m_la, end);
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
zero_user_segment(page, cur, end);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index aa8ac557493cb5..8a0381f1a43bec 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1892,15 +1892,15 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
unsigned count, req_dist, tail_index;
struct io_ring_ctx *ctx = req->ctx;
struct list_head *entry;
- struct timespec ts;
+ struct timespec64 ts;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
sqe->len != 1)
return -EINVAL;
- if (copy_from_user(&ts, (void __user *) (unsigned long) sqe->addr,
- sizeof(ts)))
+
+ if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
/*
@@ -1934,7 +1934,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
req->timeout.timer.function = io_timeout_fn;
- hrtimer_start(&req->timeout.timer, timespec_to_ktime(ts),
+ hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
HRTIMER_MODE_REL);
return 0;
}
diff --git a/fs/statfs.c b/fs/statfs.c
index eea7af6f2f229e..2616424012ea7b 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -318,19 +318,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
{
struct compat_statfs64 buf;
- if (sizeof(ubuf->f_bsize) == 4) {
- if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
- kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
- return -EOVERFLOW;
- /* f_files and f_ffree may be -1; it's okay
- * to stuff that into 32 bits */
- if (kbuf->f_files != 0xffffffffffffffffULL
- && (kbuf->f_files & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- if (kbuf->f_ffree != 0xffffffffffffffffULL
- && (kbuf->f_ffree & 0xffffffff00000000ULL))
- return -EOVERFLOW;
- }
+
+ if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+ return -EOVERFLOW;
+
memset(&buf, 0, sizeof(struct compat_statfs64));
buf.f_type = kbuf->f_type;
buf.f_bsize = kbuf->f_bsize;
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index c99b4f2482c6bf..4fe35d600ab880 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -1003,6 +1003,8 @@ struct drm_amdgpu_info_device {
__u64 high_va_max;
/* gfx10 pa_sc_tile_steering_override */
__u32 pa_sc_tile_steering_override;
+ /* disabled TCCs */
+ __u64 tcc_disabled_mask;
};
struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index 1c215ea1798e60..e168dc59e9a0d6 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -45,6 +45,27 @@ struct nvme_passthru_cmd {
__u32 result;
};
+struct nvme_passthru_cmd64 {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd1;
+ __u32 nsid;
+ __u32 cdw2;
+ __u32 cdw3;
+ __u64 metadata;
+ __u64 addr;
+ __u32 metadata_len;
+ __u32 data_len;
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u64 result;
+};
+
#define nvme_admin_cmd nvme_passthru_cmd
#define NVME_IOCTL_ID _IO('N', 0x40)
@@ -54,5 +75,7 @@ struct nvme_passthru_cmd {
#define NVME_IOCTL_RESET _IO('N', 0x44)
#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
#define NVME_IOCTL_RESCAN _IO('N', 0x46)
+#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
+#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */
diff --git a/include/uapi/linux/pg.h b/include/uapi/linux/pg.h
index 364c350e85cd34..62b6f69bd9fb0c 100644
--- a/include/uapi/linux/pg.h
+++ b/include/uapi/linux/pg.h
@@ -35,6 +35,9 @@
*/
+#ifndef _UAPI_LINUX_PG_H
+#define _UAPI_LINUX_PG_H
+
#define PG_MAGIC 'P'
#define PG_RESET 'Z'
#define PG_COMMAND 'C'
@@ -61,4 +64,4 @@ struct pg_read_hdr {
};
-/* end of pg.h */
+#endif /* _UAPI_LINUX_PG_H */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 0945805982b4b4..99335e1f4a275b 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -33,8 +33,31 @@
#define CLONE_NEWNET 0x40000000 /* New network namespace */
#define CLONE_IO 0x80000000 /* Clone io context */
-/*
- * Arguments for the clone3 syscall
+#ifndef __ASSEMBLY__
+/**
+ * struct clone_args - arguments for the clone3 syscall
+ * @flags: Flags for the new process as listed above.
+ * All flags are valid except for CSIGNAL and
+ * CLONE_DETACHED.
+ * @pidfd: If CLONE_PIDFD is set, a pidfd will be
+ * returned in this argument.
+ * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the
+ * child process will be returned in the child's
+ * memory.
+ * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of
+ * the child process will be returned in the
+ * parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ * sent when the child exits.
+ * @stack: Specify the location of the stack for the
+ * child process.
+ * @stack_size: The size of the stack for the child process.
+ * @tls: If CLONE_SETTLS is set, the tls descriptor
+ * is set to tls.
+ *
+ * The structure is versioned by size and thus extensible.
+ * New struct members must go at the end of the struct and
+ * must be properly 64bit aligned.
*/
struct clone_args {
__aligned_u64 flags;
@@ -46,6 +69,7 @@ struct clone_args {
__aligned_u64 stack_size;
__aligned_u64 tls;
};
+#endif
#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
diff --git a/kernel/fork.c b/kernel/fork.c
index 2ef529869c6417..1f6c45f6a734de 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2584,6 +2584,17 @@ static bool clone3_args_valid(const struct kernel_clone_args *kargs)
return true;
}
+/**
+ * clone3 - create a new process with specific properties
+ * @uargs: argument structure
+ * @size: size of @uargs
+ *
+ * clone3() is the extensible successor to clone()/clone2().
+ * It takes a struct as argument that is versioned by its size.
+ *
+ * Return: On success, a positive PID for the child process.
+ * On error, a negative errno number.
+ */
SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
{
int err;
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index a39bed2c784f42..168479a7d61b8c 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -174,7 +174,6 @@ static int membarrier_private_expedited(int flags)
*/
if (cpu == raw_smp_processor_id())
continue;
- rcu_read_lock();
p = rcu_dereference(cpu_rq(cpu)->curr);
if (p && p->mm == mm)
__cpumask_set_cpu(cpu, tmpmask);
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index c1f5bb590b5e43..b5a65e212df2f9 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -42,39 +42,39 @@ static int bc_shutdown(struct clock_event_device *evt)
*/
static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
{
- int bc_moved;
/*
- * We try to cancel the timer first. If the callback is on
- * flight on some other cpu then we let it handle it. If we
- * were able to cancel the timer nothing can rearm it as we
- * own broadcast_lock.
+ * This is called either from enter/exit idle code or from the
+ * broadcast handler. In all cases tick_broadcast_lock is held.
*
- * However we can also be called from the event handler of
- * ce_broadcast_hrtimer itself when it expires. We cannot
- * restart the timer because we are in the callback, but we
- * can set the expiry time and let the callback return
- * HRTIMER_RESTART.
+ * hrtimer_cancel() cannot be called here neither from the
+ * broadcast handler nor from the enter/exit idle code. The idle
+ * code can run into the problem described in bc_shutdown() and the
+ * broadcast handler cannot wait for itself to complete for obvious
+ * reasons.
*
- * Since we are in the idle loop at this point and because
- * hrtimer_{start/cancel} functions call into tracing,
- * calls to these functions must be bound within RCU_NONIDLE.
+ * Each caller tries to arm the hrtimer on its own CPU, but if the
+ * hrtimer callbback function is currently running, then
+ * hrtimer_start() cannot move it and the timer stays on the CPU on
+ * which it is assigned at the moment.
+ *
+ * As this can be called from idle code, the hrtimer_start()
+ * invocation has to be wrapped with RCU_NONIDLE() as
+ * hrtimer_start() can call into tracing.
*/
- RCU_NONIDLE(
- {
- bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
- if (bc_moved) {
- hrtimer_start(&bctimer, expires,
- HRTIMER_MODE_ABS_PINNED_HARD);
- }
- }
- );
-
- if (bc_moved) {
- /* Bind the "device" to the cpu */
- bc->bound_on = smp_processor_id();
- } else if (bc->bound_on == smp_processor_id()) {
- hrtimer_set_expires(&bctimer, expires);
- }
+ RCU_NONIDLE( {
+ hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD);
+ /*
+ * The core tick broadcast mode expects bc->bound_on to be set
+ * correctly to prevent a CPU which has the broadcast hrtimer
+ * armed from going deep idle.
+ *
+ * As tick_broadcast_lock is held, nothing can change the cpu
+ * base which was just established in hrtimer_start() above. So
+ * the below access is safe even without holding the hrtimer
+ * base lock.
+ */
+ bc->bound_on = bctimer.base->cpu_base->cpu;
+ } );
return 0;
}
@@ -100,10 +100,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
{
ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
- if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
- if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
- return HRTIMER_RESTART;
-
return HRTIMER_NORESTART;
}
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index 464c9b76148f15..7550f08822a334 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-CFLAGS += -g -I../../../../usr/include/ -lpthread
+CFLAGS += -g -I../../../../usr/include/ -pthread
TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait