From 9f365cb8bbd0162963d6852651d7c9e30adcb7b5 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 14 May 2024 13:47:23 -0700 Subject: [PATCH 001/537] scsi: mpi3mr: Use proper format specifier in mpi3mr_sas_port_add() When building for a 32-bit platform such as ARM or i386, for which size_t is unsigned int, there is a warning due to using an unsigned long format specifier: drivers/scsi/mpi3mr/mpi3mr_transport.c:1370:11: error: format specifies type 'unsigned long' but the argument has type 'unsigned int' [-Werror,-Wformat] 1369 | ioc_warn(mrioc, "skipping port %u, max allowed value is %lu\n", | ~~~ | %u 1370 | i, sizeof(mr_sas_port->phy_mask) * 8); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the proper format specifier for size_t, %zu, to resolve the warning for all platforms. Fixes: 3668651def2c ("scsi: mpi3mr: Sanitise num_phys") Signed-off-by: Nathan Chancellor Link: https://lore.kernel.org/r/20240514-mpi3mr-fix-wformat-v1-1-f1ad49217e5e@kernel.org Signed-off-by: Martin K. Petersen --- drivers/scsi/mpi3mr/mpi3mr_transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index 7ca9a7c2709cf..5d261c2f2d200 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -1366,7 +1366,7 @@ static struct mpi3mr_sas_port *mpi3mr_sas_port_add(struct mpi3mr_ioc *mrioc, continue; if (i > sizeof(mr_sas_port->phy_mask) * 8) { - ioc_warn(mrioc, "skipping port %u, max allowed value is %lu\n", + ioc_warn(mrioc, "skipping port %u, max allowed value is %zu\n", i, sizeof(mr_sas_port->phy_mask) * 8); goto out_fail; } From 10157b1fc1a762293381e9145041253420dfc6ad Mon Sep 17 00:00:00 2001 From: Martin Wilck Date: Tue, 14 May 2024 16:03:44 +0200 Subject: [PATCH 002/537] scsi: core: alua: I/O errors for ALUA state transitions When a host is configured with a few LUNs and I/O is running, injecting FC faults repeatedly leads to path recovery problems. The LUNs have 4 paths each and 3 of them come back active after say an FC fault which makes 2 of the paths go down, instead of all 4. This happens after several iterations of continuous FC faults. Reason here is that we're returning an I/O error whenever we're encountering sense code 06/04/0a (LOGICAL UNIT NOT ACCESSIBLE, ASYMMETRIC ACCESS STATE TRANSITION) instead of retrying. [mwilck: The original patch was developed by Rajashekhar M A and Hannes Reinecke. I moved the code to alua_check_sense() as suggested by Mike Christie [1]. Evan Milne had raised the question whether pg->state should be set to transitioning in the UA case [2]. I believe that doing this is correct. SCSI_ACCESS_STATE_TRANSITIONING by itself doesn't cause I/O errors. Our handler schedules an RTPG, which will only result in an I/O error condition if the transitioning timeout expires.] [1] https://lore.kernel.org/all/0bc96e82-fdda-4187-148d-5b34f81d4942@oracle.com/ [2] https://lore.kernel.org/all/CAGtn9r=kicnTDE2o7Gt5Y=yoidHYD7tG8XdMHEBJTBraVEoOCw@mail.gmail.com/ Co-developed-by: Rajashekhar M A Co-developed-by: Hannes Reinecke Signed-off-by: Hannes Reinecke Signed-off-by: Martin Wilck Link: https://lore.kernel.org/r/20240514140344.19538-1-mwilck@suse.com Reviewed-by: Damien Le Moal Reviewed-by: Christoph Hellwig Reviewed-by: Mike Christie Signed-off-by: Martin K. Petersen --- drivers/scsi/device_handler/scsi_dh_alua.c | 31 +++++++++++++++------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index a226dc1b65d71..4eb0837298d4d 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -414,28 +414,40 @@ static char print_alua_state(unsigned char state) } } -static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, - struct scsi_sense_hdr *sense_hdr) +static void alua_handle_state_transition(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; + rcu_read_lock(); + pg = rcu_dereference(h->pg); + if (pg) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + rcu_read_unlock(); + alua_check(sdev, false); +} + +static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, + struct scsi_sense_hdr *sense_hdr) +{ switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition */ - rcu_read_lock(); - pg = rcu_dereference(h->pg); - if (pg) - pg->state = SCSI_ACCESS_STATE_TRANSITIONING; - rcu_read_unlock(); - alua_check(sdev, false); + alua_handle_state_transition(sdev); return NEEDS_RETRY; } break; case UNIT_ATTENTION: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { + /* + * LUN Not Accessible - ALUA state transition + */ + alua_handle_state_transition(sdev); + return NEEDS_RETRY; + } if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { /* * Power On, Reset, or Bus Device Reset. @@ -502,7 +514,8 @@ static int alua_tur(struct scsi_device *sdev) retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &sense_hdr); - if (sense_hdr.sense_key == NOT_READY && + if ((sense_hdr.sense_key == NOT_READY || + sense_hdr.sense_key == UNIT_ATTENTION) && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) return SCSI_DH_RETRY; else if (retval) From 9fad9d560af5c654bb38e0b07ee54a4e9acdc5cd Mon Sep 17 00:00:00 2001 From: Justin Stitt Date: Wed, 8 May 2024 17:22:51 +0000 Subject: [PATCH 003/537] scsi: sr: Fix unintentional arithmetic wraparound Running syzkaller with the newly reintroduced signed integer overflow sanitizer produces this report: [ 65.194362] ------------[ cut here ]------------ [ 65.197752] UBSAN: signed-integer-overflow in ../drivers/scsi/sr_ioctl.c:436:9 [ 65.203607] -2147483648 * 177 cannot be represented in type 'int' [ 65.207911] CPU: 2 PID: 10416 Comm: syz-executor.1 Not tainted 6.8.0-rc2-00035-gb3ef86b5a957 #1 [ 65.213585] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 [ 65.219923] Call Trace: [ 65.221556] [ 65.223029] dump_stack_lvl+0x93/0xd0 [ 65.225573] handle_overflow+0x171/0x1b0 [ 65.228219] sr_select_speed+0xeb/0xf0 [ 65.230786] ? __pm_runtime_resume+0xe6/0x130 [ 65.233606] sr_block_ioctl+0x15d/0x1d0 ... Historically, the signed integer overflow sanitizer did not work in the kernel due to its interaction with `-fwrapv` but this has since been changed [1] in the newest version of Clang. It was re-enabled in the kernel with Commit 557f8c582a9b ("ubsan: Reintroduce signed overflow sanitizer"). Firstly, let's change the type of "speed" to unsigned long as sr_select_speed()'s only caller passes in an unsigned long anyways. $ git grep '\.select_speed' | drivers/scsi/sr.c: .select_speed = sr_select_speed, ... | static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi, | unsigned long arg) | { | ... | return cdi->ops->select_speed(cdi, arg); | } Next, let's add an extra check to make sure we don't exceed 0xffff/177 (350) since 0xffff is the max speed. This has two benefits: 1) we deal with integer overflow before it happens and 2) we properly respect the max speed of 0xffff. There are some "magic" numbers here but I did not want to change more than what was necessary. Link: https://github.com/llvm/llvm-project/pull/82432 [1] Closes: https://github.com/KSPP/linux/issues/357 Cc: linux-hardening@vger.kernel.org Signed-off-by: Justin Stitt Link: https://lore.kernel.org/r/20240508-b4-b4-sio-sr_select_speed-v2-1-00b68f724290@google.com Reviewed-by: Kees Cook Signed-off-by: Martin K. Petersen --- Documentation/cdrom/cdrom-standard.rst | 4 ++-- drivers/scsi/sr.h | 2 +- drivers/scsi/sr_ioctl.c | 5 ++++- include/linux/cdrom.h | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Documentation/cdrom/cdrom-standard.rst b/Documentation/cdrom/cdrom-standard.rst index 7964fe134277b..6c1303cff159e 100644 --- a/Documentation/cdrom/cdrom-standard.rst +++ b/Documentation/cdrom/cdrom-standard.rst @@ -217,7 +217,7 @@ current *struct* is:: int (*media_changed)(struct cdrom_device_info *, int); int (*tray_move)(struct cdrom_device_info *, int); int (*lock_door)(struct cdrom_device_info *, int); - int (*select_speed)(struct cdrom_device_info *, int); + int (*select_speed)(struct cdrom_device_info *, unsigned long); int (*get_last_session) (struct cdrom_device_info *, struct cdrom_multisession *); int (*get_mcn)(struct cdrom_device_info *, struct cdrom_mcn *); @@ -396,7 +396,7 @@ action need be taken, and the return value should be 0. :: - int select_speed(struct cdrom_device_info *cdi, int speed) + int select_speed(struct cdrom_device_info *cdi, unsigned long speed) Some CD-ROM drives are capable of changing their head-speed. There are several reasons for changing the speed of a CD-ROM drive. Badly diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h index 1175f2e213b56..dc899277b3a44 100644 --- a/drivers/scsi/sr.h +++ b/drivers/scsi/sr.h @@ -65,7 +65,7 @@ int sr_disk_status(struct cdrom_device_info *); int sr_get_last_session(struct cdrom_device_info *, struct cdrom_multisession *); int sr_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *); int sr_reset(struct cdrom_device_info *); -int sr_select_speed(struct cdrom_device_info *cdi, int speed); +int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed); int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *); int sr_is_xa(Scsi_CD *); diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 5b0b35e60e61f..a0d2556a27bba 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -425,11 +425,14 @@ int sr_reset(struct cdrom_device_info *cdi) return 0; } -int sr_select_speed(struct cdrom_device_info *cdi, int speed) +int sr_select_speed(struct cdrom_device_info *cdi, unsigned long speed) { Scsi_CD *cd = cdi->handle; struct packet_command cgc; + /* avoid exceeding the max speed or overflowing integer bounds */ + speed = clamp(0, speed, 0xffff / 177); + if (speed == 0) speed = 0xffff; /* set to max */ else diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 98c6fd0b39b63..fdfb61ccf55ae 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -77,7 +77,7 @@ struct cdrom_device_ops { unsigned int clearing, int slot); int (*tray_move) (struct cdrom_device_info *, int); int (*lock_door) (struct cdrom_device_info *, int); - int (*select_speed) (struct cdrom_device_info *, int); + int (*select_speed) (struct cdrom_device_info *, unsigned long); int (*get_last_session) (struct cdrom_device_info *, struct cdrom_multisession *); int (*get_mcn) (struct cdrom_device_info *, From 51071f0831ea975fc045526dd7e17efe669dc6e1 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Wed, 15 May 2024 14:40:59 +0530 Subject: [PATCH 004/537] scsi: qedf: Don't process stag work during unload and recovery Stag work can cause issues during unload and recovery, hence don't process it. Signed-off-by: Saurav Kashyap Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20240515091101.18754-2-skashyap@marvell.com Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index fd12439cbaab6..c457caae07b38 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3997,6 +3997,22 @@ void qedf_stag_change_work(struct work_struct *work) struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, stag_work.work); + if (!qedf) { + QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); + return; + } + + if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "Already is in recovery, hence not calling software context reset.\n"); + return; + } + + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n"); + return; + } + printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.", dev_name(&qedf->pdev->dev), __func__, __LINE__, qedf->dbg_ctx.host_no); From 78e88472b60936025b83eba57cffa59d3501dc07 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Wed, 15 May 2024 14:41:00 +0530 Subject: [PATCH 005/537] scsi: qedf: Wait for stag work during unload If stag work is already scheduled and unload is called, it can lead to issues as unload cleans up the work element. Wait for stag work to get completed before cleanup during unload. Signed-off-by: Saurav Kashyap Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20240515091101.18754-3-skashyap@marvell.com Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf.h | 1 + drivers/scsi/qedf/qedf_main.c | 30 +++++++++++++++++++++++++++--- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 5058e01b65a27..98afdfe636003 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -363,6 +363,7 @@ struct qedf_ctx { #define QEDF_IN_RECOVERY 5 #define QEDF_DBG_STOP_IO 6 #define QEDF_PROBING 8 +#define QEDF_STAG_IN_PROGRESS 9 unsigned long flags; /* Miscellaneous state flags */ int fipvlan_retries; u8 num_queues; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index c457caae07b38..eca0190d9a826 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -318,11 +318,18 @@ static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did, */ if (resp == fc_lport_flogi_resp) { qedf->flogi_cnt++; + qedf->flogi_pending++; + + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n"); + qedf->flogi_pending = 0; + } + if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { schedule_delayed_work(&qedf->stag_work, 2); return NULL; } - qedf->flogi_pending++; + return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, arg, timeout); } @@ -912,13 +919,14 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) struct qedf_ctx *qedf; struct qed_link_output if_link; + qedf = lport_priv(lport); + if (lport->vport) { + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); printk_ratelimited("Cannot issue host reset on NPIV port.\n"); return; } - qedf = lport_priv(lport); - qedf->flogi_pending = 0; /* For host reset, essentially do a soft link up/down */ atomic_set(&qedf->link_state, QEDF_LINK_DOWN); @@ -938,6 +946,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) if (!if_link.link_up) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Physical link is not up.\n"); + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); return; } /* Flush and wait to make sure link down is processed */ @@ -950,6 +959,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport) "Queue link up work.\n"); queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 0); + clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); } /* Reset the host by gracefully logging out and then logging back in */ @@ -3721,6 +3731,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) { struct qedf_ctx *qedf; int rc; + int cnt = 0; if (!pdev) { QEDF_ERR(NULL, "pdev is NULL.\n"); @@ -3738,6 +3749,17 @@ static void __qedf_remove(struct pci_dev *pdev, int mode) return; } +stag_in_prog: + if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt); + cnt++; + + if (cnt < 5) { + msleep(500); + goto stag_in_prog; + } + } + if (mode != QEDF_MODE_RECOVERY) set_bit(QEDF_UNLOADING, &qedf->flags); @@ -4013,6 +4035,8 @@ void qedf_stag_change_work(struct work_struct *work) return; } + set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); + printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.", dev_name(&qedf->pdev->dev), __func__, __LINE__, qedf->dbg_ctx.host_no); From 6c3bb589debd763dc4b94803ddf3c13b4fcca776 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Wed, 15 May 2024 14:41:01 +0530 Subject: [PATCH 006/537] scsi: qedf: Set qed_slowpath_params to zero before use Zero qed_slowpath_params before use. Signed-off-by: Saurav Kashyap Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20240515091101.18754-4-skashyap@marvell.com Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index eca0190d9a826..49adddf978cc7 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3473,6 +3473,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) } /* Start the Slowpath-process */ + memset(&slowpath_params, 0, sizeof(struct qed_slowpath_params)); slowpath_params.int_mode = QED_INT_MODE_MSIX; slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER; slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; From e4f5f8298cf6ddae43210d236ad65ac2c6379559 Mon Sep 17 00:00:00 2001 From: Deming Wang Date: Mon, 13 May 2024 07:59:56 -0400 Subject: [PATCH 007/537] scsi: mpt3sas: Add missing kerneldoc parameter descriptions Add missing kerneldoc parameter descriptions to _scsih_set_debug_level(). Signed-off-by: Deming Wang Link: https://lore.kernel.org/r/20240513115956.1576-1-wangdeming@inspur.com Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 89ef43a5ef862..12d08d8ba5382 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -302,8 +302,8 @@ struct _scsi_io_transfer { /** * _scsih_set_debug_level - global setting of ioc->logging_level. - * @val: ? - * @kp: ? + * @val: value of the parameter to be set + * @kp: pointer to kernel_param structure * * Note: The logging levels are defined in mpt3sas_debug.h. */ From 637c435f08ea7e77c53a2ad590b651d0de225e3b Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Thu, 16 May 2024 11:30:41 +0300 Subject: [PATCH 008/537] wifi: ath11k: Fix error path in ath11k_pcic_ext_irq_config If one of the dummy allocation fails in ath11k_pcic_ext_irq_config(), the previous allocated devices might leak due to returning without deallocating the devices. Instead of returning on the error path, deallocate all the previously allocated net_devices and then return. Fixes: bca592ead825 ("wifi: ath11k: allocate dummy net_device dynamically") Signed-off-by: Breno Leitao Reviewed-by: Simon Horman Signed-off-by: Kalle Valo Link: https://msgid.link/20240508185902.70975-1-leitao@debian.org --- drivers/net/wireless/ath/ath11k/pcic.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c index 79eb3f9c902f4..debe7c5919ef0 100644 --- a/drivers/net/wireless/ath/ath11k/pcic.c +++ b/drivers/net/wireless/ath/ath11k/pcic.c @@ -561,6 +561,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) { int i, j, n, ret, num_vectors = 0; u32 user_base_data = 0, base_vector = 0; + struct ath11k_ext_irq_grp *irq_grp; unsigned long irq_flags; ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors, @@ -574,14 +575,16 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) irq_flags |= IRQF_NOBALANCING; for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + irq_grp = &ab->ext_irq_grp[i]; u32 num_irq = 0; irq_grp->ab = ab; irq_grp->grp_id = i; irq_grp->napi_ndev = alloc_netdev_dummy(0); - if (!irq_grp->napi_ndev) - return -ENOMEM; + if (!irq_grp->napi_ndev) { + ret = -ENOMEM; + goto fail_allocate; + } netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath11k_pcic_ext_grp_napi_poll); @@ -606,11 +609,8 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) int irq = ath11k_pcic_get_msi_irq(ab, vector); if (irq < 0) { - for (n = 0; n <= i; n++) { - irq_grp = &ab->ext_irq_grp[n]; - free_netdev(irq_grp->napi_ndev); - } - return irq; + ret = irq; + goto fail_irq; } ab->irq_num[irq_idx] = irq; @@ -635,6 +635,15 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) } return 0; +fail_irq: + /* i ->napi_ndev was properly allocated. Free it also */ + i += 1; +fail_allocate: + for (n = 0; n < i; n++) { + irq_grp = &ab->ext_irq_grp[n]; + free_netdev(irq_grp->napi_ndev); + } + return ret; } int ath11k_pcic_config_irq(struct ath11k_base *ab) From 21ae74e1bf18331ae5e279bd96304b3630828009 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Fri, 17 May 2024 10:00:28 +0300 Subject: [PATCH 009/537] wifi: ath10k: fix QCOM_RPROC_COMMON dependency If ath10k_snoc is built-in, while Qualcomm remoteprocs are built as modules, compilation fails with: /usr/bin/aarch64-linux-gnu-ld: drivers/net/wireless/ath/ath10k/snoc.o: in function `ath10k_modem_init': drivers/net/wireless/ath/ath10k/snoc.c:1534: undefined reference to `qcom_register_ssr_notifier' /usr/bin/aarch64-linux-gnu-ld: drivers/net/wireless/ath/ath10k/snoc.o: in function `ath10k_modem_deinit': drivers/net/wireless/ath/ath10k/snoc.c:1551: undefined reference to `qcom_unregister_ssr_notifier' Add corresponding dependency to ATH10K_SNOC Kconfig entry so that it's built as module if QCOM_RPROC_COMMON is built as module too. Fixes: 747ff7d3d742 ("ath10k: Don't always treat modem stop events as crashes") Cc: stable@vger.kernel.org Signed-off-by: Dmitry Baryshkov Signed-off-by: Kalle Valo Link: https://msgid.link/20240511-ath10k-snoc-dep-v1-1-9666e3af5c27@linaro.org --- drivers/net/wireless/ath/ath10k/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index e6ea884cafc19..4f385f4a8cef2 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -45,6 +45,7 @@ config ATH10K_SNOC depends on ATH10K depends on ARCH_QCOM || COMPILE_TEST depends on QCOM_SMEM + depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n select QCOM_SCM select QCOM_QMI_HELPERS help From aad11473f8f4be3df86461081ce35ec5b145ba68 Mon Sep 17 00:00:00 2001 From: Dmitry Mastykin Date: Wed, 22 May 2024 10:45:24 +0300 Subject: [PATCH 010/537] NFSv4: Fix memory leak in nfs4_set_security_label We leak nfs_fattr and nfs4_label every time we set a security xattr. Signed-off-by: Dmitry Mastykin Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c93c12063b3af..94c07875aa3f4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6268,6 +6268,7 @@ nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) if (status == 0) nfs_setsecurity(inode, fattr); + nfs_free_fattr(fattr); return status; } #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ From 6cbe14f42be3b596e9590d48e12436982ba26e4b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 22 May 2024 11:27:32 -0400 Subject: [PATCH 011/537] MAINTAINERS: Change email address for Trond Myklebust Ensure that patches are sent to an email server that won't corrupt them. Signed-off-by: Trond Myklebust --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 28e20975c26f5..481d76fbf16cc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15549,7 +15549,7 @@ F: drivers/nfc/virtual_ncidev.c F: tools/testing/selftests/nci/ NFS, SUNRPC, AND LOCKD CLIENTS -M: Trond Myklebust +M: Trond Myklebust M: Anna Schumaker L: linux-nfs@vger.kernel.org S: Maintained From 134d0b3f2440cdddd12fc3444c9c0f62331ce6fc Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 21 May 2024 15:58:40 +0300 Subject: [PATCH 012/537] nfs: propagate readlink errors in nfs_symlink_filler There is an inherent race where a symlink file may have been overriden (by a different client) between lookup and readlink, resulting in a spurious EIO error returned to userspace. Fix this by propagating back ESTALE errors such that the vfs will retry the lookup/get_link (similar to nfs4_file_open) at least once. Cc: Dan Aloni Signed-off-by: Sagi Grimberg Reviewed-by: Jeff Layton Signed-off-by: Trond Myklebust --- fs/nfs/symlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index 0e27a2e4e68b8..13818129d268f 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c @@ -41,7 +41,7 @@ static int nfs_symlink_filler(struct file *file, struct folio *folio) error: folio_set_error(folio); folio_unlock(folio); - return -EIO; + return error; } static const char *nfs_get_link(struct dentry *dentry, From 5134acb15d9ef27aa2b90aad46d4e89fcef79fdc Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Wed, 22 May 2024 10:32:43 -0700 Subject: [PATCH 013/537] efi/libstub: zboot.lds: Discard .discard sections When building ARCH=loongarch defconfig + CONFIG_UNWINDER_ORC=y using LLVM, there is a warning from ld.lld when linking the EFI zboot image due to the use of unreachable() in number() in vsprintf.c: ld.lld: warning: drivers/firmware/efi/libstub/lib.a(vsprintf.stub.o):(.discard.unreachable+0x0): has non-ABS relocation R_LARCH_32_PCREL against symbol '' If the compiler cannot eliminate the default case for any reason, the .discard.unreachable section will remain in the final binary but the entire point of any section prefixed with .discard is that it is only used at compile time, so it can be discarded via /DISCARD/ in a linker script. The asm-generic vmlinux.lds.h includes .discard and .discard.* in the COMMON_DISCARDS macro but that is not used for zboot.lds, as it is not a kernel image linker script. Add .discard and .discard.* to /DISCARD/ in zboot.lds, so that any sections meant to be discarded at link time are not included in the final zboot image. This issue is not specific to LoongArch, it is just the first architecture to select CONFIG_OBJTOOL, which defines annotate_unreachable() as an asm statement to add the .discard.unreachable section, and use the EFI stub. Closes: https://github.com/ClangBuiltLinux/linux/issues/2023 Signed-off-by: Nathan Chancellor Acked-by: Huacai Chen Signed-off-by: Ard Biesheuvel --- drivers/firmware/efi/libstub/zboot.lds | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds index ac8c0ef851581..af2c82f7bd902 100644 --- a/drivers/firmware/efi/libstub/zboot.lds +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -41,6 +41,7 @@ SECTIONS } /DISCARD/ : { + *(.discard .discard.*) *(.modinfo .init.modinfo) } } From 7c23b186ab892088f76a3ad9dbff1685ffe2e832 Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Sun, 19 May 2024 13:33:53 -0300 Subject: [PATCH 014/537] efi: pstore: Return proper errors on UEFI failures Right now efi-pstore either returns 0 (success) or -EIO; but we do have a function to convert UEFI errors in different standard error codes, helping to narrow down potential issues more accurately. So, let's use this helper here. Signed-off-by: Guilherme G. Piccoli Reviewed-by: Kees Cook Signed-off-by: Ard Biesheuvel --- drivers/firmware/efi/efi-pstore.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 5b9dc26e6bcb9..552c78f5f0590 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -136,7 +136,7 @@ static int efi_pstore_read_func(struct pstore_record *record, &size, record->buf); if (status != EFI_SUCCESS) { kfree(record->buf); - return -EIO; + return efi_status_to_err(status); } /* @@ -189,7 +189,7 @@ static ssize_t efi_pstore_read(struct pstore_record *record) return 0; if (status != EFI_SUCCESS) - return -EIO; + return efi_status_to_err(status); /* skip variables that don't concern us */ if (efi_guidcmp(guid, LINUX_EFI_CRASH_GUID)) @@ -227,7 +227,7 @@ static int efi_pstore_write(struct pstore_record *record) record->size, record->psi->buf, true); efivar_unlock(); - return status == EFI_SUCCESS ? 0 : -EIO; + return efi_status_to_err(status); }; static int efi_pstore_erase(struct pstore_record *record) @@ -238,7 +238,7 @@ static int efi_pstore_erase(struct pstore_record *record) PSTORE_EFI_ATTRIBUTES, 0, NULL); if (status != EFI_SUCCESS && status != EFI_NOT_FOUND) - return -EIO; + return efi_status_to_err(status); return 0; } From e6722ea6b9ed731f7392277d76ca912dfffca7ee Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 6 Jan 2024 13:48:25 +0100 Subject: [PATCH 015/537] i2c: synquacer: Remove a clk reference from struct synquacer_i2c 'pclk' is only used locally in the probe. Remove it from the 'synquacer_i2c' structure. Also remove a useless debug message. Signed-off-by: Christophe JAILLET Acked-by: Ard Biesheuvel Signed-off-by: Andi Shyti --- drivers/i2c/busses/i2c-synquacer.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index 31ecb2c7e9783..4eccbcd0fbfc0 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -138,7 +138,6 @@ struct synquacer_i2c { int irq; struct device *dev; void __iomem *base; - struct clk *pclk; u32 pclkrate; u32 speed_khz; u32 timeout_ms; @@ -535,6 +534,7 @@ static const struct i2c_adapter synquacer_i2c_ops = { static int synquacer_i2c_probe(struct platform_device *pdev) { struct synquacer_i2c *i2c; + struct clk *pclk; u32 bus_speed; int ret; @@ -550,13 +550,12 @@ static int synquacer_i2c_probe(struct platform_device *pdev) device_property_read_u32(&pdev->dev, "socionext,pclk-rate", &i2c->pclkrate); - i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); - if (IS_ERR(i2c->pclk)) - return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk), + pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); + if (IS_ERR(pclk)) + return dev_err_probe(&pdev->dev, PTR_ERR(pclk), "failed to get and enable clock\n"); - dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk); - i2c->pclkrate = clk_get_rate(i2c->pclk); + i2c->pclkrate = clk_get_rate(pclk); if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE || i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE) From e61bcf42d290e73025bab38e0e55a5586c2d8ad5 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 15 Apr 2024 22:50:27 +0200 Subject: [PATCH 016/537] i2c: Remove I2C_CLASS_SPD Remove this class after all users have been gone. Signed-off-by: Heiner Kallweit Signed-off-by: Andi Shyti --- include/linux/i2c.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 5e6cd43a6dbdd..9709537370ee9 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -852,7 +852,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) /* i2c adapter classes (bitmask) */ #define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ -#define I2C_CLASS_SPD (1<<7) /* Memory modules */ /* Warn users that the adapter doesn't support classes anymore */ #define I2C_CLASS_DEPRECATED (1<<8) From 2360497238261f17d4a3f6cbc02d6dbd8951c23c Mon Sep 17 00:00:00 2001 From: Zhang Lixu Date: Thu, 23 May 2024 09:14:01 +0800 Subject: [PATCH 017/537] HID: intel-ish-hid: Fix build error for COMPILE_TEST kernel test robot reported build error due to a pointer type mismatch: .../ishtp/loader.c:172:8: error: incompatible pointer types passing '__le64 *' (aka 'unsigned long long *') to parameter of type 'dma_addr_t *' (aka 'unsigned int *') The issue arises because the driver, which is primarily intended for x86-64, is also built for i386 when COMPILE_TEST is enabled. Resolve type mismatch by using a temporary dma_addr_t variable to hold the DMA address. Populate this temporary variable in dma_alloc_coherent() function, and then convert and store the address in the fragment->fragment_tbl[i].ddr_adrs field in the correct format. Similarly, convert the ddr_adrs field back to dma_addr_t when freeing the DMA buffer with dma_free_coherent(). Fixes: 579a267e4617 ("HID: intel-ish-hid: Implement loading firmware from host feature") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202405201313.SAStVPrT-lkp@intel.com/ Signed-off-by: Zhang Lixu Signed-off-by: Jiri Kosina --- drivers/hid/intel-ish-hid/ishtp/loader.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c index 993f8b390e57a..2785b04a2f5a0 100644 --- a/drivers/hid/intel-ish-hid/ishtp/loader.c +++ b/drivers/hid/intel-ish-hid/ishtp/loader.c @@ -138,12 +138,13 @@ static void release_dma_bufs(struct ishtp_device *dev, struct loader_xfer_dma_fragment *fragment, void **dma_bufs, u32 fragment_size) { + dma_addr_t dma_addr; int i; for (i = 0; i < FRAGMENT_MAX_NUM; i++) { if (dma_bufs[i]) { - dma_free_coherent(dev->devc, fragment_size, dma_bufs[i], - fragment->fragment_tbl[i].ddr_adrs); + dma_addr = le64_to_cpu(fragment->fragment_tbl[i].ddr_adrs); + dma_free_coherent(dev->devc, fragment_size, dma_bufs[i], dma_addr); dma_bufs[i] = NULL; } } @@ -164,15 +165,16 @@ static int prepare_dma_bufs(struct ishtp_device *dev, struct loader_xfer_dma_fragment *fragment, void **dma_bufs, u32 fragment_size) { + dma_addr_t dma_addr; u32 offset = 0; int i; for (i = 0; i < fragment->fragment_cnt && offset < ish_fw->size; i++) { - dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size, - &fragment->fragment_tbl[i].ddr_adrs, GFP_KERNEL); + dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size, &dma_addr, GFP_KERNEL); if (!dma_bufs[i]) return -ENOMEM; + fragment->fragment_tbl[i].ddr_adrs = cpu_to_le64(dma_addr); fragment->fragment_tbl[i].length = clamp(ish_fw->size - offset, 0, fragment_size); fragment->fragment_tbl[i].fw_off = offset; memcpy(dma_bufs[i], ish_fw->data + offset, fragment->fragment_tbl[i].length); From 0a3f9f7fc59feb8a91a2793b8b60977895c72365 Mon Sep 17 00:00:00 2001 From: Chen Ni Date: Wed, 15 May 2024 11:30:51 +0800 Subject: [PATCH 018/537] HID: nvidia-shield: Add missing check for input_ff_create_memless Add check for the return value of input_ff_create_memless() and return the error if it fails in order to catch the error. Fixes: 09308562d4af ("HID: nvidia-shield: Initial driver implementation with Thunderstrike support") Signed-off-by: Chen Ni Reviewed-by: Rahul Rameshbabu Signed-off-by: Jiri Kosina --- drivers/hid/hid-nvidia-shield.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c index 58b15750dbb0a..ff9078ad19611 100644 --- a/drivers/hid/hid-nvidia-shield.c +++ b/drivers/hid/hid-nvidia-shield.c @@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create( return haptics; input_set_capability(haptics, EV_FF, FF_RUMBLE); - input_ff_create_memless(haptics, NULL, play_effect); + ret = input_ff_create_memless(haptics, NULL, play_effect); + if (ret) + goto err; ret = input_register_device(haptics); if (ret) From ed281c6ab6eb8a914f06c74dfeaebde15b34a3f4 Mon Sep 17 00:00:00 2001 From: Carl Huang Date: Tue, 21 May 2024 11:08:10 +0300 Subject: [PATCH 019/537] wifi: ath11k: fix WCN6750 firmware crash caused by 17 num_vdevs WCN6750 firmware crashes because of num_vdevs changed from 4 to 17 in ath11k_init_wmi_config_qca6390() as the ab->hw_params.num_vdevs is 17. This is caused by commit f019f4dff2e4 ("wifi: ath11k: support 2 station interfaces") which assigns ab->hw_params.num_vdevs directly to config->num_vdevs in ath11k_init_wmi_config_qca6390(), therefore WCN6750 firmware crashes as it can't support such a big num_vdevs. Fix it by assign 3 to num_vdevs in hw_params for WCN6750 as 3 is sufficient too. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3 Tested-on: WCN6750 hw1.0 AHB WLAN.MSL.1.0.1-01371-QCAMSLSWPLZ-1 Fixes: f019f4dff2e4 ("wifi: ath11k: support 2 station interfaces") Reported-by: Luca Weiss Tested-by: Luca Weiss Closes: https://lore.kernel.org/r/D15TIIDIIESY.D1EKKJLZINMA@fairphone.com/ Signed-off-by: Carl Huang Signed-off-by: Kalle Valo Link: https://msgid.link/20240520030757.2209395-1-quic_cjhuang@quicinc.com --- drivers/net/wireless/ath/ath11k/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 3cc817a3b4a40..b82e8fb285413 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -604,7 +604,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .coldboot_cal_ftm = true, .cbcal_restart_fw = false, .fw_mem_mode = 0, - .num_vdevs = 16 + 1, + .num_vdevs = 3, .num_peers = 512, .supports_suspend = false, .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074), From 6e16782d6b4a724f9c9dcd49471219643593b60c Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Tue, 21 May 2024 11:08:11 +0300 Subject: [PATCH 020/537] wifi: ath11k: move power type check to ASSOC stage when connecting to 6 GHz AP With commit bc8a0fac8677 ("wifi: mac80211: don't set bss_conf in parsing") ath11k fails to connect to 6 GHz AP. This is because currently ath11k checks AP's power type in ath11k_mac_op_assign_vif_chanctx() which would be called in AUTH stage. However with above commit power type is not available until ASSOC stage. As a result power type check fails and therefore connection fails. Fix this by moving power type check to ASSOC stage, also move regulatory rules update there because it depends on power type. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3.6510.30 Fixes: bc8a0fac8677 ("wifi: mac80211: don't set bss_conf in parsing") Signed-off-by: Baochen Qiang Acked-by: Jeff Johnson Signed-off-by: Kalle Valo Link: https://msgid.link/20240424064019.4847-1-quic_bqiang@quicinc.com --- drivers/net/wireless/ath/ath11k/mac.c | 38 ++++++++++++++++++--------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 4f62e38ba48b3..9b96dbb21d833 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -7988,8 +7988,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); int ret; - struct cur_regulatory_info *reg_info; - enum ieee80211_ap_reg_power power_type; mutex_lock(&ar->conf_mutex); @@ -8000,17 +7998,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, if (ath11k_wmi_supports_6ghz_cc_ext(ar) && ctx->def.chan->band == NL80211_BAND_6GHZ && arvif->vdev_type == WMI_VDEV_TYPE_STA) { - reg_info = &ab->reg_info_store[ar->pdev_idx]; - power_type = vif->bss_conf.power_type; - - ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type); - - if (power_type == IEEE80211_REG_UNSET_AP) { - ret = -EINVAL; - goto out; - } - - ath11k_reg_handle_chan_list(ab, reg_info, power_type); arvif->chanctx = *ctx; ath11k_mac_parse_tx_pwr_env(ar, vif, ctx); } @@ -9626,6 +9613,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta); + enum ieee80211_ap_reg_power power_type; + struct cur_regulatory_info *reg_info; struct ath11k_peer *peer; int ret = 0; @@ -9705,6 +9694,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", sta->addr, arvif->vdev_id, ret); } + + if (!ret && + ath11k_wmi_supports_6ghz_cc_ext(ar) && + arvif->vdev_type == WMI_VDEV_TYPE_STA && + arvif->chanctx.def.chan && + arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) { + reg_info = &ar->ab->reg_info_store[ar->pdev_idx]; + power_type = vif->bss_conf.power_type; + + if (power_type == IEEE80211_REG_UNSET_AP) { + ath11k_warn(ar->ab, "invalid power type %d\n", + power_type); + ret = -EINVAL; + } else { + ret = ath11k_reg_handle_chan_list(ar->ab, + reg_info, + power_type); + if (ret) + ath11k_warn(ar->ab, + "failed to handle chan list with power type %d\n", + power_type); + } + } } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { spin_lock_bh(&ar->ab->base_lock); From 803482f472ccc9576c0e606143725d1d8c61019d Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 17 May 2024 17:04:22 -0700 Subject: [PATCH 021/537] KVM: x86/mmu: Use SHADOW_NONPRESENT_VALUE for atomic zap in TDP MMU Use SHADOW_NONPRESENT_VALUE when zapping TDP MMU SPTEs with mmu_lock held for read, tdp_mmu_zap_spte_atomic() was simply missed during the initial development. Fixes: 7f01cab84928 ("KVM: x86/mmu: Allow non-zero value for non-present SPTE and removed SPTE") Signed-off-by: Isaku Yamahata [sean: write changelog] Signed-off-by: Sean Christopherson Reviewed-by: Kai Huang Message-ID: <20240518000430.1118488-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 1259dd63defc8..36539c1b36cd6 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -626,7 +626,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, * SPTEs. */ handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, - 0, iter->level, true); + SHADOW_NONPRESENT_VALUE, iter->level, true); return 0; } From 40e8a6901a2c983e3e541a363865e02cf591d458 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 17:04:28 -0700 Subject: [PATCH 022/537] KVM: VMX: Don't kill the VM on an unexpected #VE Don't terminate the VM on an unexpected #VE, as it's extremely unlikely the #VE is fatal to the guest, and even less likely that it presents a danger to the host. Simply resume the guest on "failure", as the #VE info page's BUSY field will prevent converting any more EPT Violations to #VEs for the vCPU (at least, that's what the BUSY field is supposed to do). Signed-off-by: Sean Christopherson Message-ID: <20240518000430.1118488-8-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 51b2cd13250a2..069fbbc1e04a0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5217,8 +5217,8 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) if (is_invalid_opcode(intr_info)) return handle_ud(vcpu); - if (KVM_BUG_ON(is_ve_fault(intr_info), vcpu->kvm)) - return -EIO; + if (WARN_ON_ONCE(is_ve_fault(intr_info))) + return 1; error_code = 0; if (intr_info & INTR_INFO_DELIVER_CODE_MASK) From d1b32ecdc8ad0346ac551866d9f6831995fd70de Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 17:04:23 -0700 Subject: [PATCH 023/537] KVM: nVMX: Initialize #VE info page for vmcs02 when proving #VE support Point vmcs02.VE_INFORMATION_ADDRESS at the vCPU's #VE info page when initializing vmcs02, otherwise KVM will run L2 with EPT Violation #VE enabled and a VE info address pointing at pfn 0. Fixes: 8131cf5b4fd8 ("KVM: VMX: Introduce test mode related to EPT violation VE") Signed-off-by: Sean Christopherson Message-ID: <20240518000430.1118488-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d5b832126e345..6798fadaa3350 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2242,6 +2242,9 @@ static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); + if (vmx->ve_info) + vmcs_write64(VE_INFORMATION_ADDRESS, __pa(vmx->ve_info)); + /* All VMFUNCs are currently emulated through L0 vmexits. */ if (cpu_has_vmx_vmfunc()) vmcs_write64(VM_FUNCTION_CONTROL, 0); From 9031b42139b9d45ef806c9a7fee166c1b6443c3c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 17:04:24 -0700 Subject: [PATCH 024/537] KVM: nVMX: Always handle #VEs in L0 (never forward #VEs from L2 to L1) Always handle #VEs, e.g. due to prove EPT Violation #VE failures, in L0, as KVM does not expose any #VE capabilities to L1, i.e. any and all #VEs are KVM's responsibility. Fixes: 8131cf5b4fd8 ("KVM: VMX: Introduce test mode related to EPT violation VE") Signed-off-by: Sean Christopherson Message-ID: <20240518000430.1118488-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 6798fadaa3350..643935a0f70ab 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6233,6 +6233,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, else if (is_alignment_check(intr_info) && !vmx_guest_inject_ac(vcpu)) return true; + else if (is_ve_fault(intr_info)) + return true; return false; case EXIT_REASON_EXTERNAL_INTERRUPT: return true; From 837d557aba6b816985141ddbeb7649444ed26d3b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 17:04:25 -0700 Subject: [PATCH 025/537] KVM: x86/mmu: Add sanity checks that KVM doesn't create EPT #VE SPTEs Assert that KVM doesn't set a SPTE to a value that could trigger an EPT Violation #VE on a non-MMIO SPTE, e.g. to help detect bugs even without KVM_INTEL_PROVE_VE enabled, and to help debug actual #VE failures. Note, this will run afoul of TDX support, which needs to reflect emulated MMIO accesses into the guest as #VEs (which was the whole point of adding EPT Violation #VE support in KVM). The obvious fix for that is to exempt MMIO SPTEs, but that's annoyingly difficult now that is_mmio_spte() relies on a per-VM value. However, resolving that conundrum is a future problem, whereas getting KVM_INTEL_PROVE_VE healthy is a current problem. Signed-off-by: Sean Christopherson Message-ID: <20240518000430.1118488-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 3 +++ arch/x86/kvm/mmu/spte.h | 9 +++++++++ arch/x86/kvm/mmu/tdp_iter.h | 2 ++ 3 files changed, 14 insertions(+) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 662f62dfb2aa9..1ae1c1328268b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -336,16 +336,19 @@ static int is_cpuid_PSE36(void) #ifdef CONFIG_X86_64 static void __set_spte(u64 *sptep, u64 spte) { + KVM_MMU_WARN_ON(is_ept_ve_possible(spte)); WRITE_ONCE(*sptep, spte); } static void __update_clear_spte_fast(u64 *sptep, u64 spte) { + KVM_MMU_WARN_ON(is_ept_ve_possible(spte)); WRITE_ONCE(*sptep, spte); } static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) { + KVM_MMU_WARN_ON(is_ept_ve_possible(spte)); return xchg(sptep, spte); } diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 5dd5405fa07ac..52fa004a1fbc9 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -3,6 +3,8 @@ #ifndef KVM_X86_MMU_SPTE_H #define KVM_X86_MMU_SPTE_H +#include + #include "mmu.h" #include "mmu_internal.h" @@ -276,6 +278,13 @@ static inline bool is_shadow_present_pte(u64 pte) return !!(pte & SPTE_MMU_PRESENT_MASK); } +static inline bool is_ept_ve_possible(u64 spte) +{ + return (shadow_present_mask & VMX_EPT_SUPPRESS_VE_BIT) && + !(spte & VMX_EPT_SUPPRESS_VE_BIT) && + (spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE; +} + /* * Returns true if A/D bits are supported in hardware and are enabled by KVM. * When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index fae559559a806..2880fd392e0cb 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -21,11 +21,13 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep) static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte) { + KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte)); return xchg(rcu_dereference(sptep), new_spte); } static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte) { + KVM_MMU_WARN_ON(is_ept_ve_possible(new_spte)); WRITE_ONCE(*rcu_dereference(sptep), new_spte); } From 743f1773366461cb44de297b1caf0a4292eb8fda Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 17:04:26 -0700 Subject: [PATCH 026/537] KVM: VMX: Dump VMCS on unexpected #VE Dump the VMCS on an unexpected #VE, otherwise it's practically impossible to figure out why the #VE occurred. Signed-off-by: Sean Christopherson Message-ID: <20240518000430.1118488-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 069fbbc1e04a0..a02b2720023a3 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5217,8 +5217,10 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) if (is_invalid_opcode(intr_info)) return handle_ud(vcpu); - if (WARN_ON_ONCE(is_ve_fault(intr_info))) + if (WARN_ON_ONCE(is_ve_fault(intr_info))) { + dump_vmcs(vcpu); return 1; + } error_code = 0; if (intr_info & INTR_INFO_DELIVER_CODE_MASK) From bca99c0356524a620126b789edbaf25934415467 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 17:04:27 -0700 Subject: [PATCH 027/537] KVM: x86/mmu: Print SPTEs on unexpected #VE Print the SPTEs that correspond to the faulting GPA on an unexpected EPT Violation #VE to help the user debug failures, e.g. to pinpoint which SPTE didn't have SUPPRESS_VE set. Opportunistically assert that the underlying exit reason was indeed an EPT Violation, as the CPU has *really* gone off the rails if a #VE occurs due to a completely unexpected exit reason. Signed-off-by: Sean Christopherson Message-ID: <20240518000430.1118488-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 40 ++++++++++++++++++++++++++------- arch/x86/kvm/vmx/vmx.c | 5 +++++ 3 files changed, 38 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ece45b3f6f207..f8ca74e7678f3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2154,6 +2154,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len); +void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, unsigned long roots); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 1ae1c1328268b..b7b4426a7221d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4104,23 +4104,31 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level return leaf; } -/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */ -static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) +static int get_sptes_lockless(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, + int *root_level) { - u64 sptes[PT64_ROOT_MAX_LEVEL + 1]; - struct rsvd_bits_validate *rsvd_check; - int root, leaf, level; - bool reserved = false; + int leaf; walk_shadow_page_lockless_begin(vcpu); if (is_tdp_mmu_active(vcpu)) - leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root); + leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root_level); else - leaf = get_walk(vcpu, addr, sptes, &root); + leaf = get_walk(vcpu, addr, sptes, root_level); walk_shadow_page_lockless_end(vcpu); + return leaf; +} +/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */ +static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) +{ + u64 sptes[PT64_ROOT_MAX_LEVEL + 1]; + struct rsvd_bits_validate *rsvd_check; + int root, leaf, level; + bool reserved = false; + + leaf = get_sptes_lockless(vcpu, addr, sptes, &root); if (unlikely(leaf < 0)) { *sptep = 0ull; return reserved; @@ -5924,6 +5932,22 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err } EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); +void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg) +{ + u64 sptes[PT64_ROOT_MAX_LEVEL + 1]; + int root_level, leaf, level; + + leaf = get_sptes_lockless(vcpu, gpa, sptes, &root_level); + if (unlikely(leaf < 0)) + return; + + pr_err("%s %llx", msg, gpa); + for (level = root_level; level >= leaf; level--) + pr_cont(", spte[%d] = 0x%llx", level, sptes[level]); + pr_cont("\n"); +} +EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes); + static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, hpa_t root_hpa) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a02b2720023a3..58832aae22487 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5218,7 +5218,12 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) return handle_ud(vcpu); if (WARN_ON_ONCE(is_ve_fault(intr_info))) { + struct vmx_ve_information *ve_info = vmx->ve_info; + + WARN_ONCE(ve_info->exit_reason != EXIT_REASON_EPT_VIOLATION, + "Unexpected #VE on VM-Exit reason 0x%x", ve_info->exit_reason); dump_vmcs(vcpu); + kvm_mmu_print_sptes(vcpu, ve_info->guest_physical_address, "#VE"); return 1; } From a5dc0c9b557573315633bc78bacf8f548352f95b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 17:04:29 -0700 Subject: [PATCH 028/537] KVM: VMX: Enumerate EPT Violation #VE support in /proc/cpuinfo Don't suppress printing EPT_VIOLATION_VE in /proc/cpuinfo, knowing whether or not KVM_INTEL_PROVE_VE actually does anything is extremely valuable. A privileged user can get at the information by reading the raw MSR, but the whole point of the VMX flags is to avoid needing to glean information from raw MSR reads. Signed-off-by: Sean Christopherson Message-ID: <20240518000430.1118488-9-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/vmxfeatures.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index 266daf5b5b842..695f366648899 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -77,7 +77,7 @@ #define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* "" VM-Exit on ENCLS (leaf dependent) */ #define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* "" VM-Exit on RDSEED */ #define VMX_FEATURE_PAGE_MOD_LOGGING ( 2*32+ 17) /* "pml" Log dirty pages into buffer */ -#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* "" Conditionally reflect EPT violations as #VE exceptions */ +#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* Conditionally reflect EPT violations as #VE exceptions */ #define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* "" Suppress VMX indicators in Processor Trace */ #define VMX_FEATURE_XSAVES ( 2*32+ 20) /* "" Enable XSAVES and XRSTORS in guest */ #define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */ From 6af6142e3a62efd6074905e4a94d64956a3f4b7c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 17:04:30 -0700 Subject: [PATCH 029/537] KVM: x86: Disable KVM_INTEL_PROVE_VE by default Disable KVM's "prove #VE" support by default, as it provides no functional value, and even its sanity checking benefits are relatively limited. I.e. it should be fully opt-in even on debug kernels, especially since EPT Violation #VE suppression appears to be buggy on some CPUs. Opportunistically add a line in the help text to make it abundantly clear that KVM_INTEL_PROVE_VE should never be enabled in a production environment. Suggested-by: Paolo Bonzini Signed-off-by: Sean Christopherson Message-ID: <20240518000430.1118488-10-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Kconfig | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index d64fb2b3eb69e..7f47233ddf46c 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -97,15 +97,17 @@ config KVM_INTEL config KVM_INTEL_PROVE_VE bool "Check that guests do not receive #VE exceptions" - default KVM_PROVE_MMU || DEBUG_KERNEL - depends on KVM_INTEL + depends on KVM_INTEL && EXPERT help - Checks that KVM's page table management code will not incorrectly let guests receive a virtualization exception. Virtualization exceptions will be trapped by the hypervisor rather than injected in the guest. + Note: some CPUs appear to generate spurious EPT Violations #VEs + that trigger KVM's WARN, in particular with eptad=0 and/or nested + virtualization. + If unsure, say N. config X86_SGX_KVM From 76d5363c20eeeb937b56c0ac6c61e697bd1bf154 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 17 May 2024 11:03:41 -0700 Subject: [PATCH 030/537] KVM: x86: Force KVM_WERROR if the global WERROR is enabled Force KVM_WERROR if the global WERROR is enabled to avoid pestering the user about a Kconfig that will ultimately be ignored. Force KVM_WERROR instead of making it mutually exclusive with WERROR to avoid generating a .config builds KVM with -Werror, but has KVM_WERROR=n. Suggested-by: Linus Torvalds Signed-off-by: Sean Christopherson Message-ID: <20240517180341.974251-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 7f47233ddf46c..fec95a7702703 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -44,6 +44,7 @@ config KVM select KVM_VFIO select HAVE_KVM_PM_NOTIFIER if PM select KVM_GENERIC_HARDWARE_ENABLING + select KVM_WERROR if WERROR help Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent @@ -66,7 +67,7 @@ config KVM_WERROR # FRAME_WARN, i.e. KVM_WERROR=y with KASAN=y requires special tuning. # Building KVM with -Werror and KASAN is still doable via enabling # the kernel-wide WERROR=y. - depends on KVM && EXPERT && !KASAN + depends on KVM && ((EXPERT && !KASAN) || WERROR) help Add -Werror to the build flags for KVM. From b4bd556467477420ee3a91fbcba73c579669edc6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 May 2024 19:14:35 -0700 Subject: [PATCH 031/537] KVM: SVM: WARN on vNMI + NMI window iff NMIs are outright masked When requesting an NMI window, WARN on vNMI support being enabled if and only if NMIs are actually masked, i.e. if the vCPU is already handling an NMI. KVM's ABI for NMIs that arrive simultanesouly (from KVM's point of view) is to inject one NMI and pend the other. When using vNMI, KVM pends the second NMI simply by setting V_NMI_PENDING, and lets the CPU do the rest (hardware automatically sets V_NMI_BLOCKING when an NMI is injected). However, if KVM can't immediately inject an NMI, e.g. because the vCPU is in an STI shadow or is running with GIF=0, then KVM will request an NMI window and trigger the WARN (but still function correctly). Whether or not the GIF=0 case makes sense is debatable, as the intent of KVM's behavior is to provide functionality that is as close to real hardware as possible. E.g. if two NMIs are sent in quick succession, the probability of both NMIs arriving in an STI shadow is infinitesimally low on real hardware, but significantly larger in a virtual environment, e.g. if the vCPU is preempted in the STI shadow. For GIF=0, the argument isn't as clear cut, because the window where two NMIs can collide is much larger in bare metal (though still small). That said, KVM should not have divergent behavior for the GIF=0 case based on whether or not vNMI support is enabled. And KVM has allowed simultaneous NMIs with GIF=0 for over a decade, since commit 7460fb4a3400 ("KVM: Fix simultaneous NMIs"). I.e. KVM's GIF=0 handling shouldn't be modified without a *really* good reason to do so, and if KVM's behavior were to be modified, it should be done irrespective of vNMI support. Fixes: fa4c027a7956 ("KVM: x86: Add support for SVM's Virtual NMI") Cc: stable@vger.kernel.org Cc: Santosh Shukla Cc: Maxim Levitsky Signed-off-by: Sean Christopherson Message-ID: <20240522021435.1684366-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c8dc25886c165..1d3c8be39479b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3846,16 +3846,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); /* - * KVM should never request an NMI window when vNMI is enabled, as KVM - * allows at most one to-be-injected NMI and one pending NMI, i.e. if - * two NMIs arrive simultaneously, KVM will inject one and set - * V_NMI_PENDING for the other. WARN, but continue with the standard - * single-step approach to try and salvage the pending NMI. + * If NMIs are outright masked, i.e. the vCPU is already handling an + * NMI, and KVM has not yet intercepted an IRET, then there is nothing + * more to do at this time as KVM has already enabled IRET intercepts. + * If KVM has already intercepted IRET, then single-step over the IRET, + * as NMIs aren't architecturally unmasked until the IRET completes. + * + * If vNMI is enabled, KVM should never request an NMI window if NMIs + * are masked, as KVM allows at most one to-be-injected NMI and one + * pending NMI. If two NMIs arrive simultaneously, KVM will inject one + * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are + * unmasked. KVM _will_ request an NMI window in some situations, e.g. + * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately + * inject the NMI. In those situations, KVM needs to single-step over + * the STI shadow or intercept STGI. */ - WARN_ON_ONCE(is_vnmi_enabled(svm)); + if (svm_get_nmi_mask(vcpu)) { + WARN_ON_ONCE(is_vnmi_enabled(svm)); - if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion) - return; /* IRET will cause a vm exit */ + if (!svm->awaiting_iret_completion) + return; /* IRET will cause a vm exit */ + } /* * SEV-ES guests are responsible for signaling when a vCPU is ready to From d09c05aa35909adb7d29f92f0cd79fdcd1338ef0 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Mon, 20 May 2024 22:30:40 -0400 Subject: [PATCH 032/537] scsi: core: Handle devices which return an unusually large VPD page count Peter Schneider reported that a system would no longer boot after updating to 6.8.4. Peter bisected the issue and identified commit b5fc07a5fb56 ("scsi: core: Consult supported VPD page list prior to fetching page") as being the culprit. Turns out the enclosure device in Peter's system reports a byteswapped page length for VPD page 0. It reports "02 00" as page length instead of "00 02". This causes us to attempt to access 516 bytes (page length + header) of information despite only 2 pages being present. Limit the page search scope to the size of our VPD buffer to guard against devices returning a larger page count than requested. Link: https://lore.kernel.org/r/20240521023040.2703884-1-martin.petersen@oracle.com Fixes: b5fc07a5fb56 ("scsi: core: Consult supported VPD page list prior to fetching page") Cc: stable@vger.kernel.org Reported-by: Peter Schneider Closes: https://lore.kernel.org/all/eec6ebbf-061b-4a7b-96dc-ea748aa4d035@googlemail.com/ Tested-by: Peter Schneider Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3e0c0381277ac..f0464db3f9de9 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) if (result < SCSI_VPD_HEADER_SIZE) return 0; + if (result > sizeof(vpd)) { + dev_warn_once(&sdev->sdev_gendev, + "%s: long VPD page 0 length: %d bytes\n", + __func__, result); + result = sizeof(vpd); + } + result -= SCSI_VPD_HEADER_SIZE; if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result)) return 0; From a527c3ba41c4c61e2069bfce4091e5515f06a8dd Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 24 May 2024 18:14:19 +0200 Subject: [PATCH 033/537] nfs: Avoid flushing many pages with NFS_FILE_SYNC When we are doing WB_SYNC_ALL writeback, nfs submits write requests with NFS_FILE_SYNC flag to the server (which then generally treats it as an O_SYNC write). This helps to reduce latency for single requests but when submitting more requests, additional fsyncs on the server side hurt latency. NFS generally avoids this additional overhead by not setting NFS_FILE_SYNC if desc->pg_moreio is set. However this logic doesn't always work. When we do random 4k writes to a huge file and then call fsync(2), each page writeback is going to be sent with NFS_FILE_SYNC because after preparing one page for writeback, we start writing back next, nfs_do_writepage() will call nfs_pageio_cond_complete() which finds the page is not contiguous with previously prepared IO and submits is *without* setting desc->pg_moreio. Hence NFS_FILE_SYNC is used resulting in poor performance. Fix the problem by setting desc->pg_moreio in nfs_pageio_cond_complete() before submitting outstanding IO. This improves throughput of fsync-after-random-writes on my test SSD from ~70MB/s to ~250MB/s. Signed-off-by: Jan Kara Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 6efb5068c116e..040b6b79c75e5 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1545,6 +1545,11 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) continue; } else if (index == prev->wb_index + 1) continue; + /* + * We will submit more requests after these. Indicate + * this to the underlying layers. + */ + desc->pg_moreio = 1; nfs_pageio_complete(desc); break; } From 0c8c7c559740d2d8b66048162af6c4dba8f0c88c Mon Sep 17 00:00:00 2001 From: Scott Mayhew Date: Thu, 23 May 2024 15:01:22 -0400 Subject: [PATCH 034/537] nfs: don't invalidate dentries on transient errors This is a slight variation on a patch previously proposed by Neil Brown that never got merged. Prior to commit 5ceb9d7fdaaf ("NFS: Refactor nfs_lookup_revalidate()"), any error from nfs_lookup_verify_inode() other than -ESTALE would result in nfs_lookup_revalidate() returning that error (-ESTALE is mapped to zero). Since that commit, all errors result in nfs_lookup_revalidate() returning zero, resulting in dentries being invalidated where they previously were not (particularly in the case of -ERESTARTSYS). Fix it by passing the actual error code to nfs_lookup_revalidate_done(), and leaving the decision on whether to map the error code to zero or one to nfs_lookup_revalidate_done(). A simple reproducer is to run the following python code in a subdirectory of an NFS mount (not in the root of the NFS mount): ---8<--- import os import multiprocessing import time if __name__=="__main__": multiprocessing.set_start_method("spawn") count = 0 while True: try: os.getcwd() pool = multiprocessing.Pool(10) pool.close() pool.terminate() count += 1 except Exception as e: print(f"Failed after {count} iterations") print(e) break ---8<--- Prior to commit 5ceb9d7fdaaf, the above code would run indefinitely. After commit 5ceb9d7fdaaf, it fails almost immediately with -ENOENT. Signed-off-by: Scott Mayhew Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 3429309962267..788077a4feb9a 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1627,7 +1627,16 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry, switch (error) { case 1: break; - case 0: + case -ETIMEDOUT: + if (inode && (IS_ROOT(dentry) || + NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)) + error = 1; + break; + case -ESTALE: + case -ENOENT: + error = 0; + fallthrough; + default: /* * We can't d_drop the root of a disconnected tree: * its d_hash is on the s_anon list and d_drop() would hide @@ -1682,18 +1691,8 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir, dir_verifier = nfs_save_change_attribute(dir); ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr); - if (ret < 0) { - switch (ret) { - case -ESTALE: - case -ENOENT: - ret = 0; - break; - case -ETIMEDOUT: - if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) - ret = 1; - } + if (ret < 0) goto out; - } /* Request help from readdirplus */ nfs_lookup_advise_force_readdirplus(dir, flags); @@ -1737,7 +1736,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; - int error; + int error = 0; nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE); inode = d_inode(dentry); @@ -1782,7 +1781,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, out_bad: if (flags & LOOKUP_RCU) return -ECHILD; - return nfs_lookup_revalidate_done(dir, dentry, inode, 0); + return nfs_lookup_revalidate_done(dir, dentry, inode, error); } static int From 8a01ef749b0a632f0e1f4ead0f08b3310d99fcb1 Mon Sep 17 00:00:00 2001 From: David Lechner Date: Fri, 3 May 2024 14:45:05 -0500 Subject: [PATCH 035/537] iio: adc: ad9467: fix scan type sign According to the IIO documentation, the sign in the scan type should be lower case. The ad9467 driver was incorrectly using upper case. Fix by changing to lower case. Fixes: 4606d0f4b05f ("iio: adc: ad9467: add support for AD9434 high-speed ADC") Fixes: ad6797120238 ("iio: adc: ad9467: add support AD9467 ADC") Signed-off-by: David Lechner Link: https://lore.kernel.org/r/20240503-ad9467-fix-scan-type-sign-v1-1-c7a1a066ebb9@baylibre.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad9467.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c index e85b763b9ffcb..8f5b9c3f6e3d6 100644 --- a/drivers/iio/adc/ad9467.c +++ b/drivers/iio/adc/ad9467.c @@ -243,11 +243,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index, } static const struct iio_chan_spec ad9434_channels[] = { - AD9467_CHAN(0, 0, 12, 'S'), + AD9467_CHAN(0, 0, 12, 's'), }; static const struct iio_chan_spec ad9467_channels[] = { - AD9467_CHAN(0, 0, 16, 'S'), + AD9467_CHAN(0, 0, 16, 's'), }; static const struct ad9467_chip_info ad9467_chip_tbl = { From 72d0a20fabcf231c9b4b17b0cabdcde0949d05eb Mon Sep 17 00:00:00 2001 From: Angelo Dureghello Date: Fri, 3 May 2024 20:55:28 +0200 Subject: [PATCH 036/537] dt-bindings: iio: dac: fix ad354xr output range Fix output range, as per datasheet must be -2.5 to 7.5. Signed-off-by: Angelo Dureghello Fixes: b0a96c5f599e ("dt-bindings: iio: dac: Add adi,ad3552r.yaml") Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20240503185528.2043127-1-adureghello@baylibre.org Signed-off-by: Jonathan Cameron --- Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml index 96340a05754ce..8265d709094dc 100644 --- a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml +++ b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml @@ -139,7 +139,7 @@ allOf: Voltage output range of the channel as Required connections: Rfb1x for: 0 to 2.5 V; 0 to 3V; 0 to 5 V; - Rfb2x for: 0 to 10 V; 2.5 to 7.5V; -5 to 5 V; + Rfb2x for: 0 to 10 V; -2.5 to 7.5V; -5 to 5 V; oneOf: - items: - const: 0 From 0f0f6306617cb4b6231fc9d4ec68ab9a56dba7c0 Mon Sep 17 00:00:00 2001 From: Adam Rizkalla Date: Thu, 25 Apr 2024 01:22:49 -0500 Subject: [PATCH 037/537] iio: pressure: bmp280: Fix BMP580 temperature reading Fix overflow issue when storing BMP580 temperature reading and properly preserve sign of 24-bit data. Signed-off-by: Adam Rizkalla Tested-By: Vasileios Amoiridis Acked-by: Angel Iglesias Link: https://lore.kernel.org/r/Zin2udkXRD0+GrML@adam-asahi.lan Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/pressure/bmp280-core.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index 09f53d987c7df..221fa2c552ae2 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -1394,12 +1394,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2) /* * Temperature is returned in Celsius degrees in fractional - * form down 2^16. We rescale by x1000 to return milli Celsius - * to respect IIO ABI. + * form down 2^16. We rescale by x1000 to return millidegrees + * Celsius to respect IIO ABI. */ - *val = raw_temp * 1000; - *val2 = 16; - return IIO_VAL_FRACTIONAL_LOG2; + raw_temp = sign_extend32(raw_temp, 23); + *val = ((s64)raw_temp * 1000) / (1 << 16); + return IIO_VAL_INT; } static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2) From 279428df888319bf68f2686934897301a250bb84 Mon Sep 17 00:00:00 2001 From: Marc Ferland Date: Wed, 1 May 2024 11:05:54 -0400 Subject: [PATCH 038/537] iio: dac: ad5592r: fix temperature channel scaling value The scale value for the temperature channel is (assuming Vref=2.5 and the datasheet): 376.7897513 When calculating both val and val2 for the temperature scale we use (3767897513/25) and multiply it by Vref (here I assume 2500mV) to obtain: 2500 * (3767897513/25) ==> 376789751300 Finally we divide with remainder by 10^9 to get: val = 376 val2 = 789751300 However, we return IIO_VAL_INT_PLUS_MICRO (should have been NANO) as the scale type. So when converting the raw temperature value to the 'processed' temperature value we will get (assuming raw=810, offset=-753): processed = (raw + offset) * scale_val = (810 + -753) * 376 = 21432 processed += div((raw + offset) * scale_val2, 10^6) += div((810 + -753) * 789751300, 10^6) += 45015 ==> 66447 ==> 66.4 Celcius instead of the expected 21.5 Celsius. Fix this issue by changing IIO_VAL_INT_PLUS_MICRO to IIO_VAL_INT_PLUS_NANO. Fixes: 56ca9db862bf ("iio: dac: Add support for the AD5592R/AD5593R ADCs/DACs") Signed-off-by: Marc Ferland Link: https://lore.kernel.org/r/20240501150554.1871390-1-marc.ferland@sonatest.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/dac/ad5592r-base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 076bc9ecfb499..4763402dbcd66 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, s64 tmp = *val * (3767897513LL / 25LL); *val = div_s64_rem(tmp, 1000000000LL, val2); - return IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_NANO; } mutex_lock(&st->lock); From bedb2ccb566de5ca0c336ca3fd3588cea6d50414 Mon Sep 17 00:00:00 2001 From: Vasileios Amoiridis Date: Wed, 8 May 2024 17:54:07 +0200 Subject: [PATCH 039/537] iio: imu: bmi323: Fix trigger notification in case of error In case of error in the bmi323_trigger_handler() function, the function exits without calling the iio_trigger_notify_done() which is responsible for informing the attached trigger that the process is done and in case there is a .reenable(), to call it. Fixes: 8a636db3aa57 ("iio: imu: Add driver for BMI323 IMU") Signed-off-by: Vasileios Amoiridis Link: https://lore.kernel.org/r/20240508155407.139805-1-vassilisamir@gmail.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/imu/bmi323/bmi323_core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c index 5d42ab9b176a3..67d74a1a1b26d 100644 --- a/drivers/iio/imu/bmi323/bmi323_core.c +++ b/drivers/iio/imu/bmi323/bmi323_core.c @@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p) &data->buffer.channels, ARRAY_SIZE(data->buffer.channels)); if (ret) - return IRQ_NONE; + goto out; } else { for_each_set_bit(bit, indio_dev->active_scan_mask, BMI323_CHAN_MAX) { @@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p) &data->buffer.channels[index++], BMI323_BYTES_PER_SAMPLE); if (ret) - return IRQ_NONE; + goto out; } } iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); +out: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; From a23c14b062d8800a2192077d83273bbfe6c7552d Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Mon, 13 May 2024 13:34:27 -0700 Subject: [PATCH 040/537] iio: temperature: mlx90635: Fix ERR_PTR dereference in mlx90635_probe() When devm_regmap_init_i2c() fails, regmap_ee could be error pointer, instead of checking for IS_ERR(regmap_ee), regmap is checked which looks like a copy paste error. Fixes: a1d1ba5e1c28 ("iio: temperature: mlx90635 MLX90635 IR Temperature sensor") Reviewed-by: Crt Mori Signed-off-by: Harshit Mogalapalli Link: https://lore.kernel.org/r/20240513203427.3208696-1-harshit.m.mogalapalli@oracle.com Cc: Signed-off-by: Jonathan Cameron --- drivers/iio/temperature/mlx90635.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c index 1f5c962c1818f..f7f88498ba0e8 100644 --- a/drivers/iio/temperature/mlx90635.c +++ b/drivers/iio/temperature/mlx90635.c @@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client) "failed to allocate regmap\n"); regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee); - if (IS_ERR(regmap)) - return dev_err_probe(&client->dev, PTR_ERR(regmap), - "failed to allocate regmap\n"); + if (IS_ERR(regmap_ee)) + return dev_err_probe(&client->dev, PTR_ERR(regmap_ee), + "failed to allocate EEPROM regmap\n"); mlx90635 = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); From ab6f0ab178137170a6b40f8f3d7a3806708a202c Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Tue, 21 May 2024 11:45:39 +0300 Subject: [PATCH 041/537] iio: adc: ad7173: fix buffers enablement for ad7176-2 AD7176-2 does not feature input buffers and marks corespondent register bits as read only. Enable buffers only on supported models. Fixes: 76a1e6a42802 ("iio: adc: ad7173: add AD7173 driver") Reviewed-by: David Lechner Signed-off-by: Dumitru Ceclan Link: https://lore.kernel.org/r/20240521-ad7173-fixes-v1-1-8161cc7f3ad1@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad7173.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index a7826bba0852a..91acedaddb576 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -145,6 +145,7 @@ struct ad7173_device_info { unsigned int id; char *name; bool has_temp; + bool has_input_buf; bool has_int_ref; bool has_ref2; u8 num_gpios; @@ -212,6 +213,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 4, .num_gpios = 2, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .clock = 2 * HZ_PER_MHZ, .sinc5_data_rates = ad7173_sinc5_data_rates, @@ -224,6 +226,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 8, .num_gpios = 4, .has_temp = false, + .has_input_buf = true, .has_ref2 = true, .clock = 2 * HZ_PER_MHZ, .sinc5_data_rates = ad7173_sinc5_data_rates, @@ -237,6 +240,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 8, .num_gpios = 4, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .has_ref2 = true, .clock = 2 * HZ_PER_MHZ, @@ -251,6 +255,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 4, .num_gpios = 2, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .clock = 16 * HZ_PER_MHZ, .sinc5_data_rates = ad7175_sinc5_data_rates, @@ -263,6 +268,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 8, .num_gpios = 4, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .has_ref2 = true, .clock = 16 * HZ_PER_MHZ, @@ -277,6 +283,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 4, .num_gpios = 2, .has_temp = false, + .has_input_buf = false, .has_int_ref = true, .clock = 16 * HZ_PER_MHZ, .sinc5_data_rates = ad7175_sinc5_data_rates, @@ -289,6 +296,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_configs = 4, .num_gpios = 2, .has_temp = true, + .has_input_buf = true, .has_int_ref = true, .clock = 16 * HZ_PER_MHZ, .odr_start_value = AD7177_ODR_START_VALUE, @@ -932,7 +940,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev) AD7173_CH_ADDRESS(chan_arr[chan_index].channel, chan_arr[chan_index].channel2); chan_st_priv->cfg.bipolar = false; - chan_st_priv->cfg.input_buf = true; + chan_st_priv->cfg.input_buf = st->info->has_input_buf; chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF; st->adc_mode |= AD7173_ADC_MODE_REF_EN; @@ -989,7 +997,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev) chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]); chan_st_priv->chan_reg = chan_index; - chan_st_priv->cfg.input_buf = true; + chan_st_priv->cfg.input_buf = st->info->has_input_buf; chan_st_priv->cfg.odr = 0; chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar"); From 3450ee7e800a8dc83290780c1b6ef66898e709d3 Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Tue, 21 May 2024 11:45:40 +0300 Subject: [PATCH 042/537] iio: adc: ad7173: Add ad7173_device_info names Add missing names from the device info struct for 3 models to ensure consistency with the rest of the models. Fixes: 76a1e6a42802 ("iio: adc: ad7173: add AD7173 driver") Signed-off-by: Dumitru Ceclan Link: https://lore.kernel.org/r/20240521-ad7173-fixes-v1-2-8161cc7f3ad1@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad7173.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index 91acedaddb576..4649af5f9d768 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -220,6 +220,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates), }, [ID_AD7172_4] = { + .name = "ad7172-4", .id = AD7172_4_ID, .num_inputs = 9, .num_channels = 8, @@ -262,6 +263,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates), }, [ID_AD7175_8] = { + .name = "ad7175-8", .id = AD7175_8_ID, .num_inputs = 17, .num_channels = 16, @@ -290,6 +292,7 @@ static const struct ad7173_device_info ad7173_device_info[] = { .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates), }, [ID_AD7177_2] = { + .name = "ad7177-2", .id = AD7177_ID, .num_inputs = 5, .num_channels = 4, From f00dd8953094091a8e9c8cc00661d01c33b93615 Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Tue, 21 May 2024 11:45:41 +0300 Subject: [PATCH 043/537] iio: adc: ad7173: Remove index from temp channel Temperature channel is unique per device, index is not needed. This is breaking userspace: Include fixes tag to be released within the same rc cycle. Fixes: 76a1e6a42802 ("iio: adc: ad7173: add AD7173 driver") Signed-off-by: Dumitru Ceclan Link: https://lore.kernel.org/r/20240521-ad7173-fixes-v1-3-8161cc7f3ad1@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad7173.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index 4649af5f9d768..048127bf979a9 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -815,7 +815,6 @@ static const struct iio_chan_spec ad7173_channel_template = { static const struct iio_chan_spec ad7173_temp_iio_channel_template = { .type = IIO_TEMP, - .indexed = 1, .channel = AD7173_AIN_TEMP_POS, .channel2 = AD7173_AIN_TEMP_NEG, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | From 95444b9eeb8c5c0330563931d70c61ca3b101548 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Fri, 24 May 2024 12:48:51 +0000 Subject: [PATCH 044/537] iio: invensense: fix odr switching to same value ODR switching happens in 2 steps, update to store the new value and then apply when the ODR change flag is received in the data. When switching to the same ODR value, the ODR change flag is never happening, and frequency switching is blocked waiting for the never coming apply. Fix the issue by preventing update to happen when switching to same ODR value. Fixes: 0ecc363ccea7 ("iio: make invensense timestamp module generic") Cc: stable@vger.kernel.org Signed-off-by: Jean-Baptiste Maneyrol Link: https://lore.kernel.org/r/20240524124851.567485-1-inv.git-commit@tdk.com Signed-off-by: Jonathan Cameron --- drivers/iio/common/inv_sensors/inv_sensors_timestamp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c index fa205f17bd905..f44458c380d92 100644 --- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c +++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c @@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP); int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts, uint32_t period, bool fifo) { + uint32_t mult; + /* when FIFO is on, prevent odr change if one is already pending */ if (fifo && ts->new_mult != 0) return -EAGAIN; - ts->new_mult = period / ts->chip.clock_period; + mult = period / ts->chip.clock_period; + if (mult != ts->mult) + ts->new_mult = mult; return 0; } From 825fc49497957310e421454fe3fb8b8d8d8e2dd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20T=C5=AFma?= Date: Tue, 21 May 2024 18:22:54 +0200 Subject: [PATCH 045/537] media: mgb4: Fix double debugfs remove MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes an error where debugfs_remove_recursive() is called first on a parent directory and then again on a child which causes a kernel panic. Signed-off-by: Martin Tůma Signed-off-by: Hans Verkuil Fixes: 0ab13674a9bd ("media: pci: mgb4: Added Digiteq Automotive MGB4 driver") Cc: [hverkuil: added Fixes/Cc tags] --- drivers/media/pci/mgb4/mgb4_core.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c index 60498a5abebf3..ab4f07e2e5603 100644 --- a/drivers/media/pci/mgb4/mgb4_core.c +++ b/drivers/media/pci/mgb4/mgb4_core.c @@ -642,9 +642,6 @@ static void mgb4_remove(struct pci_dev *pdev) struct mgb4_dev *mgbdev = pci_get_drvdata(pdev); int i; -#ifdef CONFIG_DEBUG_FS - debugfs_remove_recursive(mgbdev->debugfs); -#endif #if IS_REACHABLE(CONFIG_HWMON) hwmon_device_unregister(mgbdev->hwmon_dev); #endif @@ -659,6 +656,10 @@ static void mgb4_remove(struct pci_dev *pdev) if (mgbdev->vin[i]) mgb4_vin_free(mgbdev->vin[i]); +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(mgbdev->debugfs); +#endif + device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups); free_spi(mgbdev); free_i2c(mgbdev); From 7ff6c798eca05e4a9dcb80163cb454d7787a4bc3 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 21 May 2024 15:46:32 -0700 Subject: [PATCH 046/537] ACPI: APEI: EINJ: Fix einj_dev release leak The platform driver conversion of EINJ mistakenly used platform_device_del() to unwind platform_device_register_full() at module exit. This leads to a small leak of one 'struct platform_device' instance per module load/unload cycle. Switch to platform_device_unregister() which performs both device_del() and final put_device(). Fixes: 5621fafaac00 ("EINJ: Migrate to a platform driver") Cc: 6.9+ # 6.9+ Signed-off-by: Dan Williams Reviewed-by: Ben Cheatham Signed-off-by: Rafael J. Wysocki --- drivers/acpi/apei/einj-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c index 9515bcfe5e973..73903a497d73f 100644 --- a/drivers/acpi/apei/einj-core.c +++ b/drivers/acpi/apei/einj-core.c @@ -909,7 +909,7 @@ static void __exit einj_exit(void) if (einj_initialized) platform_driver_unregister(&einj_driver); - platform_device_del(einj_dev); + platform_device_unregister(einj_dev); } module_init(einj_init); From f6f172dc6a6d7775b2df6adfd1350700e9a847ec Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Wed, 22 May 2024 23:36:48 +0200 Subject: [PATCH 047/537] ACPI: EC: Abort address space access upon error When a multi-byte address space access is requested, acpi_ec_read()/ acpi_ec_write() is being called multiple times. Abort such operations if a single call to acpi_ec_read() / acpi_ec_write() fails, as the data read from / written to the EC might be incomplete. Signed-off-by: Armin Wolf Signed-off-by: Rafael J. Wysocki --- drivers/acpi/ec.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index e7793ee9e6498..a68dce2147a4d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1333,10 +1333,13 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (ec->busy_polling || bits > 8) acpi_ec_burst_enable(ec); - for (i = 0; i < bytes; ++i, ++address, ++value) + for (i = 0; i < bytes; ++i, ++address, ++value) { result = (function == ACPI_READ) ? acpi_ec_read(ec, address, value) : acpi_ec_write(ec, address, *value); + if (result < 0) + break; + } if (ec->busy_polling || bits > 8) acpi_ec_burst_disable(ec); From c4bd7f1d78340e63de4d073fd3dbe5391e2996e5 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Wed, 22 May 2024 23:36:49 +0200 Subject: [PATCH 048/537] ACPI: EC: Avoid returning AE_OK on errors in address space handler If an error code other than EINVAL, ENODEV or ETIME is returned by acpi_ec_read() / acpi_ec_write(), then AE_OK is incorrectly returned by acpi_ec_space_handler(). Fix this by only returning AE_OK on success, and return AE_ERROR otherwise. Signed-off-by: Armin Wolf [ rjw: Subject and changelog edits ] Signed-off-by: Rafael J. Wysocki --- drivers/acpi/ec.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a68dce2147a4d..68dd17f96f636 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1351,8 +1351,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, return AE_NOT_FOUND; case -ETIME: return AE_TIME; - default: + case 0: return AE_OK; + default: + return AE_ERROR; } } From 9e69acc1de306b5243ca2ef8a54242e56d01b8e5 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 23 May 2024 17:57:21 +0200 Subject: [PATCH 049/537] thermal/debugfs: Print initial trip temperature and hysteresis in tze_seq_show() The temperature and hysteresis of a trip point may change during a mitigation episode it is involved in (it may even become invalid altogether), so in order to avoid possible confusion related to that, store the temperature and hysteresis of trip points at the time they are crossed on the way up and print those values instead of their current temperature and hysteresis. Fixes: 7ef01f228c9f ("thermal/debugfs: Add thermal debugfs information for mitigation episodes") Signed-off-by: Rafael J. Wysocki --- drivers/thermal/thermal_debugfs.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c index 91f9c21235a8a..e0e4eb6fad397 100644 --- a/drivers/thermal/thermal_debugfs.c +++ b/drivers/thermal/thermal_debugfs.c @@ -91,6 +91,8 @@ struct cdev_record { * * @timestamp: the trip crossing timestamp * @duration: total time when the zone temperature was above the trip point + * @trip_temp: trip temperature at mitigation start + * @trip_hyst: trip hysteresis at mitigation start * @count: the number of times the zone temperature was above the trip point * @max: maximum recorded temperature above the trip point * @min: minimum recorded temperature above the trip point @@ -99,6 +101,8 @@ struct cdev_record { struct trip_stats { ktime_t timestamp; ktime_t duration; + int trip_temp; + int trip_hyst; int count; int max; int min; @@ -574,6 +578,7 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, struct thermal_debugfs *thermal_dbg = tz->debugfs; int trip_id = thermal_zone_trip_id(tz, trip); ktime_t now = ktime_get(); + struct trip_stats *trip_stats; if (!thermal_dbg) return; @@ -639,7 +644,10 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz, tz_dbg->trips_crossed[tz_dbg->nr_trips++] = trip_id; tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node); - tze->trip_stats[trip_id].timestamp = now; + trip_stats = &tze->trip_stats[trip_id]; + trip_stats->trip_temp = trip->temperature; + trip_stats->trip_hyst = trip->hysteresis; + trip_stats->timestamp = now; unlock: mutex_unlock(&thermal_dbg->lock); @@ -836,8 +844,8 @@ static int tze_seq_show(struct seq_file *s, void *v) seq_printf(s, "| %*d | %*s | %*d | %*d | %c%*lld | %*d | %*d | %*d |\n", 4 , trip_id, 8, type, - 9, trip->temperature, - 9, trip->hysteresis, + 9, trip_stats->trip_temp, + 9, trip_stats->trip_hyst, c, 10, duration_ms, 9, trip_stats->avg, 9, trip_stats->min, From 5a599e10e53d1914adf0032c730bc0f604a5d947 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 23 May 2024 17:58:42 +0200 Subject: [PATCH 050/537] thermal/debugfs: Allow tze_seq_show() to print statistics for invalid trips Commit a6258fde8de3 ("thermal/debugfs: Make tze_seq_show() skip invalid trips and trips with no stats") modified tze_seq_show() to skip invalid trips, but it overlooked the fact that a trip may become invalid during a mitigation eposide involving it, in which case its statistics should still be reported. For this reason, remove the invalid trip temperature check from the main loop in tze_seq_show(). The trips that have never been valid will still be skipped after this change because there are no statistics to report for them. Fixes: a6258fde8de3 ("thermal/debugfs: Make tze_seq_show() skip invalid trips and trips with no stats") Signed-off-by: Rafael J. Wysocki --- drivers/thermal/thermal_debugfs.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c index e0e4eb6fad397..9424472291570 100644 --- a/drivers/thermal/thermal_debugfs.c +++ b/drivers/thermal/thermal_debugfs.c @@ -802,10 +802,6 @@ static int tze_seq_show(struct seq_file *s, void *v) const struct thermal_trip *trip = &td->trip; struct trip_stats *trip_stats; - /* Skip invalid trips. */ - if (trip->temperature == THERMAL_TEMP_INVALID) - continue; - /* * There is no possible mitigation happening at the * critical trip point, so the stats will be always From cb573eec609c44f44b27c2fa07afd926eddc9f89 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 23 May 2024 18:00:14 +0200 Subject: [PATCH 051/537] thermal: core: Introduce thermal_trip_crossed() Add a helper function called thermal_trip_crossed() to be invoked by __thermal_zone_device_update() in order to notify user space, the thermal debug code and the zone governor about trip crossing. Subsequently, this will also be used in the case when a trip point becomes invalid after being crossed on the way up. No intentional functional impact. Signed-off-by: Rafael J. Wysocki --- drivers/thermal/thermal_core.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 54cce4e523bc5..c119ef920793d 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -467,6 +467,21 @@ static void thermal_governor_trip_crossed(struct thermal_governor *governor, governor->trip_crossed(tz, trip, crossed_up); } +static void thermal_trip_crossed(struct thermal_zone_device *tz, + const struct thermal_trip *trip, + struct thermal_governor *governor, + bool crossed_up) +{ + if (crossed_up) { + thermal_notify_tz_trip_up(tz, trip); + thermal_debug_tz_trip_up(tz, trip); + } else { + thermal_notify_tz_trip_down(tz, trip); + thermal_debug_tz_trip_down(tz, trip); + } + thermal_governor_trip_crossed(governor, tz, trip, crossed_up); +} + static int thermal_trip_notify_cmp(void *ascending, const struct list_head *a, const struct list_head *b) { @@ -506,18 +521,12 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, handle_thermal_trip(tz, td, &way_up_list, &way_down_list); list_sort(&way_up_list, &way_up_list, thermal_trip_notify_cmp); - list_for_each_entry(td, &way_up_list, notify_list_node) { - thermal_notify_tz_trip_up(tz, &td->trip); - thermal_debug_tz_trip_up(tz, &td->trip); - thermal_governor_trip_crossed(governor, tz, &td->trip, true); - } + list_for_each_entry(td, &way_up_list, notify_list_node) + thermal_trip_crossed(tz, &td->trip, governor, true); list_sort(NULL, &way_down_list, thermal_trip_notify_cmp); - list_for_each_entry(td, &way_down_list, notify_list_node) { - thermal_notify_tz_trip_down(tz, &td->trip); - thermal_debug_tz_trip_down(tz, &td->trip); - thermal_governor_trip_crossed(governor, tz, &td->trip, false); - } + list_for_each_entry(td, &way_down_list, notify_list_node) + thermal_trip_crossed(tz, &td->trip, governor, false); if (governor->manage) governor->manage(tz); From ae2170d6ea96e652c7fb5689f1980986bf48b7b8 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 23 May 2024 18:05:03 +0200 Subject: [PATCH 052/537] thermal: trip: Trigger trip down notifications when trips involved in mitigation become invalid When a trip point becomes invalid after being crossed on the way up, it is involved in a mitigation episode that needs to be adjusted to compensate for the trip going away. For this reason, introduce thermal_zone_trip_down() as a wrapper around thermal_trip_crossed() and make thermal_zone_set_trip_temp() call it if the new temperature of the trip at hand is equal to THERMAL_TEMP_INVALID and it has been crossed on the way up to trigger all of the necessary adjustments in user space, the thermal debug code and the zone governor. Fixes: 8c69a777e480 ("thermal: core: Fix the handling of invalid trip points") Signed-off-by: Rafael J. Wysocki --- drivers/thermal/thermal_core.c | 6 ++++++ drivers/thermal/thermal_core.h | 2 ++ drivers/thermal/thermal_trip.c | 20 ++++++++++++-------- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index c119ef920793d..30567b4994551 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -602,6 +602,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz, } EXPORT_SYMBOL_GPL(thermal_zone_device_update); +void thermal_zone_trip_down(struct thermal_zone_device *tz, + const struct thermal_trip *trip) +{ + thermal_trip_crossed(tz, trip, thermal_get_tz_governor(tz), false); +} + int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *), void *data) { diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index d9785e5bbb08c..20e7b45673d68 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -246,6 +246,8 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz, void thermal_zone_trip_updated(struct thermal_zone_device *tz, const struct thermal_trip *trip); int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); +void thermal_zone_trip_down(struct thermal_zone_device *tz, + const struct thermal_trip *trip); /* sysfs I/F */ int thermal_zone_create_device_groups(struct thermal_zone_device *tz); diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c index d6a6acc78ddb6..49e63db685172 100644 --- a/drivers/thermal/thermal_trip.c +++ b/drivers/thermal/thermal_trip.c @@ -152,17 +152,23 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, if (trip->temperature == temp) return; + trip->temperature = temp; + thermal_notify_tz_trip_change(tz, trip); + if (temp == THERMAL_TEMP_INVALID) { struct thermal_trip_desc *td = trip_to_trip_desc(trip); - if (trip->type == THERMAL_TRIP_PASSIVE && - tz->temperature >= td->threshold) { + if (tz->temperature >= td->threshold) { /* - * The trip has been crossed, so the thermal zone's - * passive count needs to be adjusted. + * The trip has been crossed on the way up, so some + * adjustments are needed to compensate for the lack + * of it going forward. */ - tz->passive--; - WARN_ON_ONCE(tz->passive < 0); + if (trip->type == THERMAL_TRIP_PASSIVE) { + tz->passive--; + WARN_ON_ONCE(tz->passive < 0); + } + thermal_zone_trip_down(tz, trip); } /* * Invalidate the threshold to avoid triggering a spurious @@ -170,7 +176,5 @@ void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, */ td->threshold = INT_MAX; } - trip->temperature = temp; - thermal_notify_tz_trip_change(tz, trip); } EXPORT_SYMBOL_GPL(thermal_zone_set_trip_temp); From e5c7bd4e5ca0f549108a7013ba77885926c6a56b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 22 May 2024 13:40:19 -0300 Subject: [PATCH 053/537] tools include UAPI: Sync linux/stat.h with the kernel sources To get the changes in: 2a82bb02941fb53d ("statx: stx_subvol") To pick up this change and support it: $ tools/perf/trace/beauty/statx_mask.sh > before $ cp include/uapi/linux/stat.h tools/perf/trace/beauty/include/uapi/linux/stat.h $ tools/perf/trace/beauty/statx_mask.sh > after $ diff -u before after --- before 2024-05-22 13:39:49.742470571 -0300 +++ after 2024-05-22 13:39:59.157883101 -0300 @@ -14,4 +14,5 @@ [ilog2(0x00001000) + 1] = "MNT_ID", [ilog2(0x00002000) + 1] = "DIOALIGN", [ilog2(0x00004000) + 1] = "MNT_ID_UNIQUE", + [ilog2(0x00008000) + 1] = "SUBVOL", }; $ Now we'll see it like we see these: # perf trace -e statx 0.000 ( 0.015 ms): systemd-userwo/3982299 statx(dfd: 6, filename: ".", mask: TYPE|INO|MNT_ID, buffer: 0x7ffd8945e850) = 0 180.559 ( 0.007 ms): (ostnamed)/3982957 statx(dfd: 4, filename: "sys", flags: SYMLINK_NOFOLLOW|NO_AUTOMOUNT|STATX_DONT_SYNC, mask: TYPE, buffer: 0x7fff13161190) = 0 180.918 ( 0.011 ms): (ostnamed)/3982957 statx(dfd: CWD, filename: "/run/systemd/mount-rootfs/sys/kernel/security", flags: SYMLINK_NOFOLLOW|NO_AUTOMOUNT|STATX_DONT_SYNC, mask: MNT_ID, buffer: 0x7fff13161120) = 0 180.956 ( 0.010 ms): (ostnamed)/3982957 statx(dfd: CWD, filename: "/run/systemd/mount-rootfs/sys/fs/cgroup", flags: SYMLINK_NOFOLLOW|NO_AUTOMOUNT|STATX_DONT_SYNC, mask: MNT_ID, buffer: 0x7fff13161120) = 0 Cc: Adrian Hunter Cc: Christian Brauner Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Kent Overstreet Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/Zk5nO9yT0oPezUoo@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/include/uapi/linux/stat.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/trace/beauty/include/uapi/linux/stat.h b/tools/perf/trace/beauty/include/uapi/linux/stat.h index 2f2ee82d55175..67626d5353166 100644 --- a/tools/perf/trace/beauty/include/uapi/linux/stat.h +++ b/tools/perf/trace/beauty/include/uapi/linux/stat.h @@ -126,8 +126,9 @@ struct statx { __u64 stx_mnt_id; __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */ + __u64 stx_subvol; /* Subvolume identifier */ /* 0xa0 */ - __u64 __spare3[12]; /* Spare space for future expansion */ + __u64 __spare3[11]; /* Spare space for future expansion */ /* 0x100 */ }; @@ -155,6 +156,7 @@ struct statx { #define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ #define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ #define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ +#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ From 0efc88e444d9d74f220f60a3e37143c8decf1bea Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 27 May 2024 12:15:27 -0300 Subject: [PATCH 054/537] tools headers UAPI: Sync linux/prctl.h with the kernel sources To pick the changes in: 628d701f2de5b9a1 ("powerpc/dexcr: Add DEXCR prctl interface") 6b9391b581fddd85 ("riscv: Include riscv_set_icache_flush_ctx prctl") That adds some PowerPC and a RISC-V specific prctl options: $ tools/perf/trace/beauty/prctl_option.sh > before $ cp include/uapi/linux/prctl.h tools/perf/trace/beauty/include/uapi/linux/prctl.h $ tools/perf/trace/beauty/prctl_option.sh > after $ diff -u before after --- before 2024-05-27 12:14:21.358032781 -0300 +++ after 2024-05-27 12:14:32.364530185 -0300 @@ -65,6 +65,9 @@ [68] = "GET_MEMORY_MERGE", [69] = "RISCV_V_SET_CONTROL", [70] = "RISCV_V_GET_CONTROL", + [71] = "RISCV_SET_ICACHE_FLUSH_CTX", + [72] = "PPC_GET_DEXCR", + [73] = "PPC_SET_DEXCR", }; static const char *prctl_set_mm_options[] = { [1] = "START_CODE", $ That now will be used to decode the syscall option and also to compose filters, for instance: [root@five ~]# perf trace -e syscalls:sys_enter_prctl --filter option==SET_NAME 0.000 Isolated Servi/3474327 syscalls:sys_enter_prctl(option: SET_NAME, arg2: 0x7f23f13b7aee) 0.032 DOM Worker/3474327 syscalls:sys_enter_prctl(option: SET_NAME, arg2: 0x7f23deb25670) 7.920 :3474328/3474328 syscalls:sys_enter_prctl(option: SET_NAME, arg2: 0x7f23e24fbb10) 7.935 StreamT~s #374/3474328 syscalls:sys_enter_prctl(option: SET_NAME, arg2: 0x7f23e24fb970) 8.400 Isolated Servi/3474329 syscalls:sys_enter_prctl(option: SET_NAME, arg2: 0x7f23e24bab10) 8.418 StreamT~s #374/3474329 syscalls:sys_enter_prctl(option: SET_NAME, arg2: 0x7f23e24ba970) ^C[root@five ~]# This addresses this perf build warning: Warning: Kernel ABI header differences: diff -u tools/include/uapi/linux/prctl.h include/uapi/linux/prctl.h Cc: Adrian Hunter Cc: Benjamin Gray Cc: Charlie Jenkins Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Michael Ellerman Cc: Namhyung Kim Cc: Palmer Dabbelt Link: https://lore.kernel.org/lkml/ZlSklGWp--v_Ije7@x1 Signed-off-by: Arnaldo Carvalho de Melo --- .../trace/beauty/include/uapi/linux/prctl.h | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tools/perf/trace/beauty/include/uapi/linux/prctl.h b/tools/perf/trace/beauty/include/uapi/linux/prctl.h index 370ed14b1ae09..35791791a879b 100644 --- a/tools/perf/trace/beauty/include/uapi/linux/prctl.h +++ b/tools/perf/trace/beauty/include/uapi/linux/prctl.h @@ -306,4 +306,26 @@ struct prctl_mm_map { # define PR_RISCV_V_VSTATE_CTRL_NEXT_MASK 0xc # define PR_RISCV_V_VSTATE_CTRL_MASK 0x1f +#define PR_RISCV_SET_ICACHE_FLUSH_CTX 71 +# define PR_RISCV_CTX_SW_FENCEI_ON 0 +# define PR_RISCV_CTX_SW_FENCEI_OFF 1 +# define PR_RISCV_SCOPE_PER_PROCESS 0 +# define PR_RISCV_SCOPE_PER_THREAD 1 + +/* PowerPC Dynamic Execution Control Register (DEXCR) controls */ +#define PR_PPC_GET_DEXCR 72 +#define PR_PPC_SET_DEXCR 73 +/* DEXCR aspect to act on */ +# define PR_PPC_DEXCR_SBHE 0 /* Speculative branch hint enable */ +# define PR_PPC_DEXCR_IBRTPD 1 /* Indirect branch recurrent target prediction disable */ +# define PR_PPC_DEXCR_SRAPD 2 /* Subroutine return address prediction disable */ +# define PR_PPC_DEXCR_NPHIE 3 /* Non-privileged hash instruction enable */ +/* Action to apply / return */ +# define PR_PPC_DEXCR_CTRL_EDITABLE 0x1 /* Aspect can be modified with PR_PPC_SET_DEXCR */ +# define PR_PPC_DEXCR_CTRL_SET 0x2 /* Set the aspect for this process */ +# define PR_PPC_DEXCR_CTRL_CLEAR 0x4 /* Clear the aspect for this process */ +# define PR_PPC_DEXCR_CTRL_SET_ONEXEC 0x8 /* Set the aspect on exec */ +# define PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC 0x10 /* Clear the aspect on exec */ +# define PR_PPC_DEXCR_CTRL_MASK 0x1f + #endif /* _LINUX_PRCTL_H */ From 1437a9f06f740cc2950d9b6ac23fad838ff023a3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 27 May 2024 12:34:20 -0300 Subject: [PATCH 055/537] tools headers UAPI: Sync fcntl.h with the kernel sources to pick F_DUPFD_QUERY There is no scrape script yet for those, but the warning pointed out we need to update the array with the F_LINUX_SPECIFIC_BASE entries, do it. Now 'perf trace' can decode that cmd and also use it in filter, as in: root@number:~# perf trace -e syscalls:*enter_fcntl --filter 'cmd != SETFL && cmd != GETFL' 0.000 sssd_kcm/303828 syscalls:sys_enter_fcntl(fd: 13, cmd: SETLK, arg: 0x7fffdc6a8a50) 0.013 sssd_kcm/303828 syscalls:sys_enter_fcntl(fd: 13, cmd: SETLKW, arg: 0x7fffdc6a8aa0) 0.090 sssd_kcm/303828 syscalls:sys_enter_fcntl(fd: 13, cmd: SETLKW, arg: 0x7fffdc6a88e0) ^Croot@number:~# This picks up the changes in: c62b758bae6af16f ("fcntl: add F_DUPFD_QUERY fcntl()") Addressing this perf tools build warning: Warning: Kernel ABI header differences: diff -u tools/perf/trace/beauty/include/uapi/linux/fcntl.h include/uapi/linux/fcntl.h Cc: Adrian Hunter Cc: Christian Brauner Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZlSqNQH9mFw2bmjq@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-trace.c | 2 +- tools/perf/trace/beauty/include/uapi/linux/fcntl.h | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 51eca671c7976..08a3a6effac18 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -765,7 +765,7 @@ static const char *fcntl_cmds[] = { static DEFINE_STRARRAY(fcntl_cmds, "F_"); static const char *fcntl_linux_specific_cmds[] = { - "SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC", + "SETLEASE", "GETLEASE", "NOTIFY", "DUPFD_QUERY", [5] = "CANCELLK", "DUPFD_CLOEXEC", "SETPIPE_SZ", "GETPIPE_SZ", "ADD_SEALS", "GET_SEALS", "GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT", }; diff --git a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h index 282e90aeb163c..c0bcc185fa48f 100644 --- a/tools/perf/trace/beauty/include/uapi/linux/fcntl.h +++ b/tools/perf/trace/beauty/include/uapi/linux/fcntl.h @@ -8,6 +8,14 @@ #define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0) #define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1) +/* + * Request nofications on a directory. + * See below for events that may be notified. + */ +#define F_NOTIFY (F_LINUX_SPECIFIC_BASE + 2) + +#define F_DUPFD_QUERY (F_LINUX_SPECIFIC_BASE + 3) + /* * Cancel a blocking posix lock; internal use only until we expose an * asynchronous lock api to userspace: @@ -17,12 +25,6 @@ /* Create a file descriptor with FD_CLOEXEC set. */ #define F_DUPFD_CLOEXEC (F_LINUX_SPECIFIC_BASE + 6) -/* - * Request nofications on a directory. - * See below for events that may be notified. - */ -#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2) - /* * Set and get of pipe page size array */ From a3eed53beec4a7bac8acd109abcb187df2577781 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 27 May 2024 12:45:33 -0300 Subject: [PATCH 056/537] perf beauty: Update copy of linux/socket.h with the kernel sources To pick up the fixes in: 0645fbe760afcc53 ("net: have do_accept() take a struct proto_accept_arg argument") That just changes a function prototype, not touching things used by the perf scrape scripts such as: $ tools/perf/trace/beauty/sockaddr.sh | head -5 static const char *socket_families[] = { [0] = "UNSPEC", [1] = "LOCAL", [2] = "INET", [3] = "AX25", $ This addresses this perf tools build warning: Warning: Kernel ABI header differences: diff -u tools/perf/trace/beauty/include/linux/socket.h include/linux/socket.h Cc: Adrian Hunter Cc: Ian Rogers Cc: Jens Axboe Cc: Jiri Olsa Cc: Kan Liang Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZlSrceExgjrUiDb5@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/include/linux/socket.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h index 139c330ccf2c3..89d16b90370bd 100644 --- a/tools/perf/trace/beauty/include/linux/socket.h +++ b/tools/perf/trace/beauty/include/linux/socket.h @@ -16,6 +16,7 @@ struct cred; struct socket; struct sock; struct sk_buff; +struct proto_accept_arg; #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -433,7 +434,7 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, extern int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags, struct sockaddr __user *addr, int addr_len); -extern struct file *do_accept(struct file *file, unsigned file_flags, +extern struct file *do_accept(struct file *file, struct proto_accept_arg *arg, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, From 001821b0e79716c4e17c71d8e053a23599a7a508 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 27 May 2024 12:51:30 -0300 Subject: [PATCH 057/537] perf trace beauty: Update the arch/x86/include/asm/irq_vectors.h copy with the kernel sources to pick POSTED_MSI_NOTIFICATION To pick up the change in: f5a3562ec9dd29e6 ("x86/irq: Reserve a per CPU IDT vector for posted MSIs") That picks up this new vector: $ cp arch/x86/include/asm/irq_vectors.h tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h $ tools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh > after $ diff -u before after --- before 2024-05-27 12:50:47.708863932 -0300 +++ after 2024-05-27 12:51:15.335113123 -0300 @@ -1,6 +1,7 @@ static const char *x86_irq_vectors[] = { [0x02] = "NMI", [0x80] = "IA32_SYSCALL", + [0xeb] = "POSTED_MSI_NOTIFICATION", [0xec] = "LOCAL_TIMER", [0xed] = "HYPERV_STIMER0", [0xee] = "HYPERV_REENLIGHTENMENT", $ Now those will be known when pretty printing the irq_vectors:* tracepoints. Cc: Adrian Hunter Cc: Ian Rogers Cc: Jacob Pan Cc: Jiri Olsa Cc: Kan Liang Cc: Namhyung Kim Cc: Thomas Gleixner Link: https://lore.kernel.org/lkml/ZlS34M0x30EFVhbg@x1 Signed-off-by: Arnaldo Carvalho de Melo --- .../perf/trace/beauty/arch/x86/include/asm/irq_vectors.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h index d18bfb238f660..13aea8fc3d45f 100644 --- a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h +++ b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h @@ -97,10 +97,16 @@ #define LOCAL_TIMER_VECTOR 0xec +/* + * Posted interrupt notification vector for all device MSIs delivered to + * the host kernel. + */ +#define POSTED_MSI_NOTIFICATION_VECTOR 0xeb + #define NR_VECTORS 256 #ifdef CONFIG_X86_LOCAL_APIC -#define FIRST_SYSTEM_VECTOR LOCAL_TIMER_VECTOR +#define FIRST_SYSTEM_VECTOR POSTED_MSI_NOTIFICATION_VECTOR #else #define FIRST_SYSTEM_VECTOR NR_VECTORS #endif From 947051e361d551e0590777080ffc4926190f62f2 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 24 May 2024 15:19:54 +0100 Subject: [PATCH 058/537] KVM: arm64: Fix AArch32 register narrowing on userspace write When userspace writes to one of the core registers, we make sure to narrow the corresponding GPRs if PSTATE indicates an AArch32 context. The code tries to check whether the context is EL0 or EL1 so that it narrows the correct registers. But it does so by checking the full PSTATE instead of PSTATE.M. As a consequence, and if we are restoring an AArch32 EL0 context in a 64bit guest, and that PSTATE has *any* bit set outside of PSTATE.M, we narrow *all* registers instead of only the first 15, destroying the 64bit state. Obviously, this is not something the guest is likely to enjoy. Correctly masking PSTATE to only evaluate PSTATE.M fixes it. Fixes: 90c1f934ed71 ("KVM: arm64: Get rid of the AArch32 register mapping code") Reported-by: Nina Schoetterl-Glausch Cc: stable@vger.kernel.org Reviewed-by: Nina Schoetterl-Glausch Acked-by: Oliver Upton Link: https://lore.kernel.org/r/20240524141956.1450304-2-maz@kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/kvm/guest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index e2f762d959bb3..d9617b11f7a87 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -276,7 +276,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { int i, nr_reg; - switch (*vcpu_cpsr(vcpu)) { + switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) { /* * Either we are dealing with user mode, and only the * first 15 registers (+ PC) must be narrowed to 32bit. From dfe6d190f38fc5df5ff2614b463a5195a399c885 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 24 May 2024 15:19:55 +0100 Subject: [PATCH 059/537] KVM: arm64: Allow AArch32 PSTATE.M to be restored as System mode It appears that we don't allow a vcpu to be restored in AArch32 System mode, as we *never* included it in the list of valid modes. Just add it to the list of allowed modes. Fixes: 0d854a60b1d7 ("arm64: KVM: enable initialization of a 32bit vcpu") Cc: stable@vger.kernel.org Acked-by: Oliver Upton Link: https://lore.kernel.org/r/20240524141956.1450304-3-maz@kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/kvm/guest.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index d9617b11f7a87..11098eb7eb44a 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) case PSR_AA32_MODE_SVC: case PSR_AA32_MODE_ABT: case PSR_AA32_MODE_UND: + case PSR_AA32_MODE_SYS: if (!vcpu_el1_is_32bit(vcpu)) return -EINVAL; break; From c92e8b9eacebb4060634ebd9395bba1b29aadc68 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 24 May 2024 15:19:56 +0100 Subject: [PATCH 060/537] KVM: arm64: AArch32: Fix spurious trapping of conditional instructions We recently upgraded the view of ESR_EL2 to 64bit, in keeping with the requirements of the architecture. However, the AArch32 emulation code was left unaudited, and the (already dodgy) code that triages whether a trap is spurious or not (because the condition code failed) broke in a subtle way: If ESR_EL2.ISS2 is ever non-zero (unlikely, but hey, this is the ARM architecture we're talking about), the hack that tests the top bits of ESR_EL2.EC will break in an interesting way. Instead, use kvm_vcpu_trap_get_class() to obtain the EC, and list all the possible ECs that can fail a condition code check. While we're at it, add SMC32 to the list, as it is explicitly listed as being allowed to trap despite failing a condition code check (as described in the HCR_EL2.TSC documentation). Fixes: 0b12620fddb8 ("KVM: arm64: Treat ESR_EL2 as a 64-bit register") Cc: stable@vger.kernel.org Acked-by: Oliver Upton Link: https://lore.kernel.org/r/20240524141956.1450304-4-maz@kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/aarch32.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c index 8d9670e6615dc..449fa58cf3b63 100644 --- a/arch/arm64/kvm/hyp/aarch32.c +++ b/arch/arm64/kvm/hyp/aarch32.c @@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu) u32 cpsr_cond; int cond; - /* Top two bits non-zero? Unconditional. */ - if (kvm_vcpu_get_esr(vcpu) >> 30) + /* + * These are the exception classes that could fire with a + * conditional instruction. + */ + switch (kvm_vcpu_trap_get_class(vcpu)) { + case ESR_ELx_EC_CP15_32: + case ESR_ELx_EC_CP15_64: + case ESR_ELx_EC_CP14_MR: + case ESR_ELx_EC_CP14_LS: + case ESR_ELx_EC_FP_ASIMD: + case ESR_ELx_EC_CP10_ID: + case ESR_ELx_EC_CP14_64: + case ESR_ELx_EC_SVC32: + break; + default: return true; + } /* Is condition field valid? */ cond = kvm_vcpu_get_condition(vcpu); From fd7ccfb112302c6941f03095544c10879ced98be Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Fri, 17 May 2024 09:42:04 +0300 Subject: [PATCH 061/537] media: Documentation: v4l: Fix ACTIVE route flag The documentation in one occasion mentions the VIDIOC_SUBDEV_STREAM_FL_ACTIVE flag. This was meant to be V4L2_SUBDEV_STREAM_FL_ACTIVE as it's a flag, not an IOCTL. Fix it. Fixes: cd2c75454d74 ("media: Documentation: Document S_ROUTING behaviour") Reported-by: Samuel Wein PhD Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- Documentation/userspace-api/media/v4l/dev-subdev.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/userspace-api/media/v4l/dev-subdev.rst b/Documentation/userspace-api/media/v4l/dev-subdev.rst index 0f9eda3187f34..161b43f1ce66a 100644 --- a/Documentation/userspace-api/media/v4l/dev-subdev.rst +++ b/Documentation/userspace-api/media/v4l/dev-subdev.rst @@ -582,7 +582,7 @@ depending on the hardware. In all cases, however, only routes that have the Devices generating the streams may allow enabling and disabling some of the routes or have a fixed routing configuration. If the routes can be disabled, not declaring the routes (or declaring them without -``VIDIOC_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will +``V4L2_SUBDEV_STREAM_FL_ACTIVE`` flag set) in ``VIDIOC_SUBDEV_S_ROUTING`` will disable the routes. ``VIDIOC_SUBDEV_S_ROUTING`` will still return such routes back to the user in the routes array, with the ``V4L2_SUBDEV_STREAM_FL_ACTIVE`` flag unset. From fe61b2906bd046535f4ef7dfcd69562f531ccd38 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 20 May 2024 11:55:54 +0200 Subject: [PATCH 062/537] media: intel/ipu6: Fix some redundant resources freeing in ipu6_pci_remove() pcim_iomap_regions() and pcim_enable_device() are used in the probe. So the corresponding managed resources don't need to be freed explicitly in the remove function. Remove the incorrect pci_release_regions() and pci_disable_device() calls. Fixes: 25fedc021985 ("media: intel/ipu6: add Intel IPU6 PCI device driver") Signed-off-by: Christophe JAILLET Reviewed-by: Bingbu Cao Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- drivers/media/pci/intel/ipu6/ipu6.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index d2bebd2084615..f587f609259d7 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -727,9 +727,6 @@ static void ipu6_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - release_firmware(isp->cpd_fw); ipu6_mmu_cleanup(psys_mmu); From 266b44ec9a26dd2ad5d1dfcbb2f7f4e2c10da3fb Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 26 May 2024 09:59:56 +0200 Subject: [PATCH 063/537] media: intel/ipu6: Move isys_remove() close to isys_probe() In preparation to fixing a leak in isys_probe(), move isys_remove(). The fix will introduce a new function that will also be called from isys_remove(). The code needs to be rearranged to avoid a forward declaration. Having the .remove function close to the .probe function is also more standard. Signed-off-by: Christophe JAILLET Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- drivers/media/pci/intel/ipu6/ipu6-isys.c | 70 ++++++++++++------------ 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c index 5992138c7290d..7ce2047a09b59 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c @@ -925,41 +925,6 @@ static const struct dev_pm_ops isys_pm_ops = { .resume = isys_resume, }; -static void isys_remove(struct auxiliary_device *auxdev) -{ - struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); - struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); - struct ipu6_device *isp = adev->isp; - struct isys_fw_msgs *fwmsg, *safe; - unsigned int i; - - list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) - dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), - fwmsg, fwmsg->dma_addr, 0); - - list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) - dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), - fwmsg, fwmsg->dma_addr, 0); - - isys_unregister_devices(isys); - isys_notifier_cleanup(isys); - - cpu_latency_qos_remove_request(&isys->pm_qos); - - if (!isp->secure_mode) { - ipu6_cpd_free_pkg_dir(adev); - ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); - release_firmware(adev->fw); - } - - for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) - mutex_destroy(&isys->streams[i].mutex); - - isys_iwake_watermark_cleanup(isys); - mutex_destroy(&isys->stream_mutex); - mutex_destroy(&isys->mutex); -} - static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount) { struct device *dev = &isys->adev->auxdev.dev; @@ -1167,6 +1132,41 @@ static int isys_probe(struct auxiliary_device *auxdev, return ret; } +static void isys_remove(struct auxiliary_device *auxdev) +{ + struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); + struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); + struct ipu6_device *isp = adev->isp; + struct isys_fw_msgs *fwmsg, *safe; + unsigned int i; + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) + dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), + fwmsg, fwmsg->dma_addr, 0); + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) + dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), + fwmsg, fwmsg->dma_addr, 0); + + isys_unregister_devices(isys); + isys_notifier_cleanup(isys); + + cpu_latency_qos_remove_request(&isys->pm_qos); + + if (!isp->secure_mode) { + ipu6_cpd_free_pkg_dir(adev); + ipu6_buttress_unmap_fw_image(adev, &adev->fw_sgt); + release_firmware(adev->fw); + } + + for (i = 0; i < IPU6_ISYS_MAX_STREAMS; i++) + mutex_destroy(&isys->streams[i].mutex); + + isys_iwake_watermark_cleanup(isys); + mutex_destroy(&isys->stream_mutex); + mutex_destroy(&isys->mutex); +} + struct fwmsg { int type; char *msg; From ab0ed481012811fed052f609b364c118ce8a576e Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 26 May 2024 09:59:57 +0200 Subject: [PATCH 064/537] media: intel/ipu6: Fix an error handling path in isys_probe() If an error occurs after a successful alloc_fw_msg_bufs() call, some resources should be released as already done in the remove function. Add a new free_fw_msg_bufs() function that releases what has been allocated by alloc_fw_msg_bufs(). Also use this new function in isys_remove() to avoid some code duplication. Fixes: f50c4ca0a820 ("media: intel/ipu6: add the main input system driver") Signed-off-by: Christophe JAILLET Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- drivers/media/pci/intel/ipu6/ipu6-isys.c | 27 ++++++++++++++++-------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c index 7ce2047a09b59..1998b72ac07de 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c @@ -925,6 +925,20 @@ static const struct dev_pm_ops isys_pm_ops = { .resume = isys_resume, }; +static void free_fw_msg_bufs(struct ipu6_isys *isys) +{ + struct device *dev = &isys->adev->auxdev.dev; + struct isys_fw_msgs *fwmsg, *safe; + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) + dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg, + fwmsg->dma_addr, 0); + + list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) + dma_free_attrs(dev, sizeof(struct isys_fw_msgs), fwmsg, + fwmsg->dma_addr, 0); +} + static int alloc_fw_msg_bufs(struct ipu6_isys *isys, int amount) { struct device *dev = &isys->adev->auxdev.dev; @@ -1105,12 +1119,14 @@ static int isys_probe(struct auxiliary_device *auxdev, ret = isys_register_devices(isys); if (ret) - goto out_remove_pkg_dir_shared_buffer; + goto free_fw_msg_bufs; ipu6_mmu_hw_cleanup(adev->mmu); return 0; +free_fw_msg_bufs: + free_fw_msg_bufs(isys); out_remove_pkg_dir_shared_buffer: if (!isp->secure_mode) ipu6_cpd_free_pkg_dir(adev); @@ -1137,16 +1153,9 @@ static void isys_remove(struct auxiliary_device *auxdev) struct ipu6_bus_device *adev = auxdev_to_adev(auxdev); struct ipu6_isys *isys = dev_get_drvdata(&auxdev->dev); struct ipu6_device *isp = adev->isp; - struct isys_fw_msgs *fwmsg, *safe; unsigned int i; - list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) - dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), - fwmsg, fwmsg->dma_addr, 0); - - list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) - dma_free_attrs(&auxdev->dev, sizeof(struct isys_fw_msgs), - fwmsg, fwmsg->dma_addr, 0); + free_fw_msg_bufs(isys); isys_unregister_devices(isys); isys_notifier_cleanup(isys); From c19fa08c1414644b0d9275d336bdaff90af57d0b Mon Sep 17 00:00:00 2001 From: Bingbu Cao Date: Mon, 27 May 2024 18:48:57 +0800 Subject: [PATCH 065/537] media: intel/ipu6: fix the buffer flags caused by wrong parentheses The buffer flags is set by wrong due to wrong parentheses, the FL_INCOMING flag is never taken an account. Fix it by wrapping the ternary conditional operation with parentheses. Fixes: 3c1dfb5a69cf ("media: intel/ipu6: input system video nodes and buffer queues") Signed-off-by: Bingbu Cao Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- drivers/media/pci/intel/ipu6/ipu6-isys-queue.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c index 40a8ebfcfce2d..4bd4e324abc9f 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys-queue.c @@ -301,10 +301,10 @@ static int ipu6_isys_stream_start(struct ipu6_isys_video *av, out_requeue: if (bl && bl->nbufs) ipu6_isys_buffer_list_queue(bl, - (IPU6_ISYS_BUFFER_LIST_FL_INCOMING | - error) ? + IPU6_ISYS_BUFFER_LIST_FL_INCOMING | + (error ? IPU6_ISYS_BUFFER_LIST_FL_SET_STATE : - 0, error ? VB2_BUF_STATE_ERROR : + 0), error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_QUEUED); flush_firmware_streamon_fail(stream); From 0c07c273a5fe1a25d4f477fe7edf64b3e8b19b3d Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 27 May 2024 14:15:22 +0200 Subject: [PATCH 066/537] debugfs: continue to ignore unknown mount options Wolfram reported that debugfs remained empty on some of his boards triggering the message "debugfs: Unknown parameter 'auto'". The root of the issue is that we ignored unknown mount options in the old mount api but we started rejecting unknown mount options in the new mount api. Continue to ignore unknown mount options to not regress userspace. Fixes: a20971c18752 ("vfs: Convert debugfs to use the new mount API") Link: https://lore.kernel.org/r/20240527100618.np2wqiw5mz7as3vk@ninjato Reported-by: Wolfram Sang Tested-by: Wolfram Sang Signed-off-by: Christian Brauner --- fs/debugfs/inode.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index dc51df0b118d0..8fd928899a59e 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -107,8 +107,16 @@ static int debugfs_parse_param(struct fs_context *fc, struct fs_parameter *param int opt; opt = fs_parse(fc, debugfs_param_specs, param, &result); - if (opt < 0) + if (opt < 0) { + /* + * We might like to report bad mount options here; but + * traditionally debugfs has ignored all mount options + */ + if (opt == -ENOPARAM) + return 0; + return opt; + } switch (opt) { case Opt_uid: From db003a28e03f95f2bcb63f037a2078b8870b1ecd Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 28 May 2024 14:33:20 +0200 Subject: [PATCH 067/537] netfs: fix kernel doc for nets_wait_for_outstanding_io() The @inode parameter wasn't documented leading to new doc build warnings. Fixes: f89ea63f1c65 ("netfs, 9p: Fix race between umount and async request completion") Link: https://lore.kernel.org/r/20240528133050.7e09d78e@canb.auug.org.au Signed-off-by: Christian Brauner --- include/linux/netfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 3ca3906bb8da6..5d0288938cc2d 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -521,7 +521,7 @@ static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx) /** * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete - * @ctx: The netfs inode to wait on + * @inode: The netfs inode to wait on * * Wait for outstanding I/O requests of any type to complete. This is intended * to be called from inode eviction routines. This makes sure that any From da42b5229b27bb5c0eff3408c92f025e6041dad3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 28 May 2024 11:01:20 -0300 Subject: [PATCH 068/537] tools headers: Update the syscall tables and unistd.h, mostly to support the new 'mseal' syscall But also to wire up shadow stacks on 32-bit x86, picking up those changes from these csets: ff388fe5c481d39c ("mseal: wire up mseal syscall") 2883f01ec37dd866 ("x86/shstk: Enable shadow stacks for x32") This makes 'perf trace' support it, now its possible, for instance to do: # perf trace -e mseal --max-stack=16 Here is an example with the 'sendmmsg' syscall: root@x1:~# perf trace -e sendmmsg --max-stack 16 --max-events=1 0.000 ( 0.062 ms): dbus-broker/1012 sendmmsg(fd: 150, mmsg: 0x7ffef57cca50, vlen: 1, flags: DONTWAIT|NOSIGNAL) = 1 syscall_exit_to_user_mode_prepare ([kernel.kallsyms]) syscall_exit_to_user_mode_prepare ([kernel.kallsyms]) syscall_exit_to_user_mode ([kernel.kallsyms]) do_syscall_64 ([kernel.kallsyms]) entry_SYSCALL_64 ([kernel.kallsyms]) [0x117ce7] (/usr/lib64/libc.so.6 (deleted)) root@x1:~# To do a system wide tracing of the new 'mseal' syscall with a backtrace of at most 16 entries. This addresses these perf tools build warnings: Warning: Kernel ABI header differences: diff -u tools/include/uapi/asm-generic/unistd.h include/uapi/asm-generic/unistd.h diff -u tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl diff -u tools/perf/arch/powerpc/entry/syscalls/syscall.tbl arch/powerpc/kernel/syscalls/syscall.tbl diff -u tools/perf/arch/s390/entry/syscalls/syscall.tbl arch/s390/kernel/syscalls/syscall.tbl diff -u tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl arch/mips/kernel/syscalls/syscall_n64.tbl Cc: Adrian Hunter Cc: Andrew Morton Cc: H J Lu Cc: Ian Rogers Cc: Ingo Molnar Cc: Jeff Xu Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZlXlo4TNcba4wnVZ@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/asm-generic/unistd.h | 5 ++++- tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl | 1 + tools/perf/arch/powerpc/entry/syscalls/syscall.tbl | 1 + tools/perf/arch/s390/entry/syscalls/syscall.tbl | 1 + tools/perf/arch/x86/entry/syscalls/syscall_64.tbl | 3 ++- 5 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index 75f00965ab158..d983c48a3b6af 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -842,8 +842,11 @@ __SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr) #define __NR_lsm_list_modules 461 __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) +#define __NR_mseal 462 +__SYSCALL(__NR_mseal, sys_mseal) + #undef __NR_syscalls -#define __NR_syscalls 462 +#define __NR_syscalls 463 /* * 32 bit systems traditionally used different diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl index 532b855df5895..1464c6be6eb3c 100644 --- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl +++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl @@ -376,3 +376,4 @@ 459 n64 lsm_get_self_attr sys_lsm_get_self_attr 460 n64 lsm_set_self_attr sys_lsm_set_self_attr 461 n64 lsm_list_modules sys_lsm_list_modules +462 n64 mseal sys_mseal diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 17173b82ca21d..3656f1ca7a21c 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -548,3 +548,4 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index 095bb86339a7d..bd0fee24ad10a 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -464,3 +464,4 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal sys_mseal diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 7e8d46f4147f5..a396f6e6ab5bf 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -374,7 +374,7 @@ 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 451 common cachestat sys_cachestat 452 common fchmodat2 sys_fchmodat2 -453 64 map_shadow_stack sys_map_shadow_stack +453 common map_shadow_stack sys_map_shadow_stack 454 common futex_wake sys_futex_wake 455 common futex_wait sys_futex_wait 456 common futex_requeue sys_futex_requeue @@ -383,6 +383,7 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal # # Due to a historical design error, certain syscalls are numbered differently From f13e01b89daf42330a4a722f451e48c3e2edfc8d Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 17 May 2024 12:48:25 +0100 Subject: [PATCH 069/537] btrfs: ensure fast fsync waits for ordered extents after a write failure If a write path in COW mode fails, either before submitting a bio for the new extents or an actual IO error happens, we can end up allowing a fast fsync to log file extent items that point to unwritten extents. This is because dropping the extent maps happens when completing ordered extents, at btrfs_finish_one_ordered(), and the completion of an ordered extent is executed in a work queue. This can result in a fast fsync to start logging file extent items based on existing extent maps before the ordered extents complete, therefore resulting in a log that has file extent items that point to unwritten extents, resulting in a corrupt file if a crash happens after and the log tree is replayed the next time the fs is mounted. This can happen for both direct IO writes and buffered writes. For example consider a direct IO write, in COW mode, that fails at btrfs_dio_submit_io() because btrfs_extract_ordered_extent() returned an error: 1) We call btrfs_finish_ordered_extent() with the 'uptodate' parameter set to false, meaning an error happened; 2) That results in marking the ordered extent with the BTRFS_ORDERED_IOERR flag; 3) btrfs_finish_ordered_extent() queues the completion of the ordered extent - so that btrfs_finish_one_ordered() will be executed later in a work queue. That function will drop extent maps in the range when it's executed, since the extent maps point to unwritten locations (signaled by the BTRFS_ORDERED_IOERR flag); 4) After calling btrfs_finish_ordered_extent() we keep going down the write path and unlock the inode; 5) After that a fast fsync starts and locks the inode; 6) Before the work queue executes btrfs_finish_one_ordered(), the fsync task sees the extent maps that point to the unwritten locations and logs file extent items based on them - it does not know they are unwritten, and the fast fsync path does not wait for ordered extents to complete, which is an intentional behaviour in order to reduce latency. For the buffered write case, here's one example: 1) A fast fsync begins, and it starts by flushing delalloc and waiting for the writeback to complete by calling filemap_fdatawait_range(); 2) Flushing the dellaloc created a new extent map X; 3) During the writeback some IO error happened, and at the end io callback (end_bbio_data_write()) we call btrfs_finish_ordered_extent(), which sets the BTRFS_ORDERED_IOERR flag in the ordered extent and queues its completion; 4) After queuing the ordered extent completion, the end io callback clears the writeback flag from all pages (or folios), and from that moment the fast fsync can proceed; 5) The fast fsync proceeds sees extent map X and logs a file extent item based on extent map X, resulting in a log that points to an unwritten data extent - because the ordered extent completion hasn't run yet, it happens only after the logging. To fix this make btrfs_finish_ordered_extent() set the inode flag BTRFS_INODE_NEEDS_FULL_SYNC in case an error happened for a COW write, so that a fast fsync will wait for ordered extent completion. Note that this issues of using extent maps that point to unwritten locations can not happen for reads, because in read paths we start by locking the extent range and wait for any ordered extents in the range to complete before looking for extent maps. Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/btrfs_inode.h | 10 ++++++++++ fs/btrfs/file.c | 16 ++++++++++++++++ fs/btrfs/ordered-data.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+) diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 91c994b569f3b..6ed495ca7a311 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -89,6 +89,16 @@ enum { BTRFS_INODE_FREE_SPACE_INODE, /* Set when there are no capabilities in XATTs for the inode. */ BTRFS_INODE_NO_CAP_XATTR, + /* + * Set if an error happened when doing a COW write before submitting a + * bio or during writeback. Used for both buffered writes and direct IO + * writes. This is to signal a fast fsync that it has to wait for + * ordered extents to complete and therefore not log extent maps that + * point to unwritten extents (when an ordered extent completes and it + * has the BTRFS_ORDERED_IOERR flag set, it drops extent maps in its + * range). + */ + BTRFS_INODE_COW_WRITE_ERROR, }; /* in memory btrfs inode */ diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0c7c1b42028e3..00670596bf065 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1885,6 +1885,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) */ if (full_sync || btrfs_is_zoned(fs_info)) { ret = btrfs_wait_ordered_range(inode, start, len); + clear_bit(BTRFS_INODE_COW_WRITE_ERROR, &BTRFS_I(inode)->runtime_flags); } else { /* * Get our ordered extents as soon as possible to avoid doing @@ -1894,6 +1895,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) btrfs_get_ordered_extents_for_logging(BTRFS_I(inode), &ctx.ordered_extents); ret = filemap_fdatawait_range(inode->i_mapping, start, end); + if (ret) + goto out_release_extents; + + /* + * Check and clear the BTRFS_INODE_COW_WRITE_ERROR now after + * starting and waiting for writeback, because for buffered IO + * it may have been set during the end IO callback + * (end_bbio_data_write() -> btrfs_finish_ordered_extent()) in + * case an error happened and we need to wait for ordered + * extents to complete so that any extent maps that point to + * unwritten locations are dropped and we don't log them. + */ + if (test_and_clear_bit(BTRFS_INODE_COW_WRITE_ERROR, + &BTRFS_I(inode)->runtime_flags)) + ret = btrfs_wait_ordered_range(inode, start, len); } if (ret) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index c5bdd674f55c5..35a413ce935d9 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -388,6 +388,37 @@ bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered, ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate); spin_unlock_irqrestore(&inode->ordered_tree_lock, flags); + /* + * If this is a COW write it means we created new extent maps for the + * range and they point to unwritten locations if we got an error either + * before submitting a bio or during IO. + * + * We have marked the ordered extent with BTRFS_ORDERED_IOERR, and we + * are queuing its completion below. During completion, at + * btrfs_finish_one_ordered(), we will drop the extent maps for the + * unwritten extents. + * + * However because completion runs in a work queue we can end up having + * a fast fsync running before that. In the case of direct IO, once we + * unlock the inode the fsync might start, and we queue the completion + * before unlocking the inode. In the case of buffered IO when writeback + * finishes (end_bbio_data_write()) we queue the completion, so if the + * writeback was triggered by a fast fsync, the fsync might start + * logging before ordered extent completion runs in the work queue. + * + * The fast fsync will log file extent items based on the extent maps it + * finds, so if by the time it collects extent maps the ordered extent + * completion didn't happen yet, it will log file extent items that + * point to unwritten extents, resulting in a corruption if a crash + * happens and the log tree is replayed. Note that a fast fsync does not + * wait for completion of ordered extents in order to reduce latency. + * + * Set a flag in the inode so that the next fast fsync will wait for + * ordered extents to complete before starting to log. + */ + if (!uptodate && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) + set_bit(BTRFS_INODE_COW_WRITE_ERROR, &inode->runtime_flags); + if (ret) btrfs_queue_ordered_fn(ordered); return ret; From 43cad521c6d228ea0c51e248f8e5b3a6295a2849 Mon Sep 17 00:00:00 2001 From: Dhananjay Ugwekar Date: Tue, 30 Apr 2024 14:07:06 +0530 Subject: [PATCH 070/537] tools/power/cpupower: Fix Pstate frequency reporting on AMD Family 1Ah CPUs Update cpupower's P-State frequency calculation and reporting with AMD Family 1Ah+ processors, when using the acpi-cpufreq driver. This is due to a change in the PStateDef MSR layout in AMD Family 1Ah+. Tested on 4th and 5th Gen AMD EPYC system Signed-off-by: Ananth Narayan Signed-off-by: Dhananjay Ugwekar Reviewed-by: Mario Limonciello Signed-off-by: Shuah Khan --- tools/power/cpupower/utils/helpers/amd.c | 26 +++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c index c519cc89c97f4..0a56e22240fc8 100644 --- a/tools/power/cpupower/utils/helpers/amd.c +++ b/tools/power/cpupower/utils/helpers/amd.c @@ -41,6 +41,16 @@ union core_pstate { unsigned res1:31; unsigned en:1; } pstatedef; + /* since fam 1Ah: */ + struct { + unsigned fid:12; + unsigned res1:2; + unsigned vid:8; + unsigned iddval:8; + unsigned idddiv:2; + unsigned res2:31; + unsigned en:1; + } pstatedef2; unsigned long long val; }; @@ -48,6 +58,10 @@ static int get_did(union core_pstate pstate) { int t; + /* Fam 1Ah onward do not use did */ + if (cpupower_cpu_info.family >= 0x1A) + return 0; + if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) t = pstate.pstatedef.did; else if (cpupower_cpu_info.family == 0x12) @@ -61,12 +75,18 @@ static int get_did(union core_pstate pstate) static int get_cof(union core_pstate pstate) { int t; - int fid, did, cof; + int fid, did, cof = 0; did = get_did(pstate); if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATEDEF) { - fid = pstate.pstatedef.fid; - cof = 200 * fid / did; + if (cpupower_cpu_info.family >= 0x1A) { + fid = pstate.pstatedef2.fid; + if (fid > 0x0f) + cof = (fid * 5); + } else { + fid = pstate.pstatedef.fid; + cof = 200 * fid / did; + } } else { t = 0x10; fid = pstate.pstate.fid; From a0fc1a053b7a212244ff109e44469b8deff280c5 Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Fri, 24 May 2024 16:58:26 -0700 Subject: [PATCH 071/537] of: of_test: add MODULE_DESCRIPTION() Fix the 'make W=1' warning: WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/of/of_test.o Signed-off-by: Jeff Johnson Link: https://lore.kernel.org/r/20240524-md-of-of_test-v1-1-6ebd078d620f@quicinc.com Signed-off-by: Rob Herring (Arm) --- drivers/of/of_test.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c index a9301d293f014..c85a258bc6ae6 100644 --- a/drivers/of/of_test.c +++ b/drivers/of/of_test.c @@ -54,4 +54,5 @@ static struct kunit_suite of_dtb_suite = { kunit_test_suites( &of_dtb_suite, ); +MODULE_DESCRIPTION("KUnit tests for OF APIs"); MODULE_LICENSE("GPL"); From ac4b069035783f7a54b3ab841119f4b6bf435f98 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 28 May 2024 15:10:28 -0300 Subject: [PATCH 072/537] tools arch x86: Sync the msr-index.h copy with the kernel sources To pick up the changes from these csets: 53bc516ade85a764 ("x86/msr: Move ARCH_CAP_XAPIC_DISABLE bit definition to its rightful place") That patch just move definitions around, so this just silences this perf build warning: Warning: Kernel ABI header differences: diff -u tools/arch/x86/include/asm/msr-index.h arch/x86/include/asm/msr-index. Cc: Adrian Hunter Cc: Borislav Petkov (AMD) Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Cc: Pawan Gupta Link: https://lore.kernel.org/lkml/ZlYe8jOzd1_DyA7X@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/include/asm/msr-index.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index e72c2b8729579..e022e6eb766c6 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -170,6 +170,10 @@ * CPU is not affected by Branch * History Injection. */ +#define ARCH_CAP_XAPIC_DISABLE BIT(21) /* + * IA32_XAPIC_DISABLE_STATUS MSR + * supported + */ #define ARCH_CAP_PBRSB_NO BIT(24) /* * Not susceptible to Post-Barrier * Return Stack Buffer Predictions. @@ -192,11 +196,6 @@ * File. */ -#define ARCH_CAP_XAPIC_DISABLE BIT(21) /* - * IA32_XAPIC_DISABLE_STATUS MSR - * supported - */ - #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* * Writeback and invalidate the From 88e520512a68b4e724cb0f4281c79ae28673ccb1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 28 May 2024 16:24:15 -0300 Subject: [PATCH 073/537] tools headers UAPI: Sync kvm headers with the kernel sources To pick the changes in: 4af663c2f64a8d25 ("KVM: SEV: Allow per-guest configuration of GHCB protocol version") 4f5defae708992dd ("KVM: SEV: introduce KVM_SEV_INIT2 operation") 26c44aa9e076ed83 ("KVM: SEV: define VM types for SEV and SEV-ES") ac5c48027bacb1b5 ("KVM: SEV: publish supported VMSA features") 651d61bc8b7d8bb6 ("KVM: PPC: Fix documentation for ppc mmu caps") That don't change functionality in tools/perf, as no new ioctl is added for the 'perf trace' scripts to harvest. This addresses these perf build warnings: Warning: Kernel ABI header differences: diff -u tools/include/uapi/linux/kvm.h include/uapi/linux/kvm.h diff -u tools/arch/x86/include/uapi/asm/kvm.h arch/x86/include/uapi/asm/kvm.h Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Joel Stanley Cc: Michael Ellerman Cc: Michael Roth Cc: Namhyung Kim Cc: Paolo Bonzini Link: https://lore.kernel.org/lkml/ZlYxAdHjyAkvGtMW@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/include/uapi/asm/kvm.h | 22 ++++++++++++++++++++-- tools/include/uapi/linux/kvm.h | 4 ++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index ef11aa4cab425..9fae1b73b529c 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -457,8 +457,13 @@ struct kvm_sync_regs { #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 -/* attributes for system fd (group 0) */ -#define KVM_X86_XCOMP_GUEST_SUPP 0 +/* vendor-independent attributes for system fd (group 0) */ +#define KVM_X86_GRP_SYSTEM 0 +# define KVM_X86_XCOMP_GUEST_SUPP 0 + +/* vendor-specific groups and attributes for system fd */ +#define KVM_X86_GRP_SEV 1 +# define KVM_X86_SEV_VMSA_FEATURES 0 struct kvm_vmx_nested_state_data { __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; @@ -689,6 +694,9 @@ enum sev_cmd_id { /* Guest Migration Extension */ KVM_SEV_SEND_CANCEL, + /* Second time is the charm; improved versions of the above ioctls. */ + KVM_SEV_INIT2, + KVM_SEV_NR_MAX, }; @@ -700,6 +708,14 @@ struct kvm_sev_cmd { __u32 sev_fd; }; +struct kvm_sev_init { + __u64 vmsa_features; + __u32 flags; + __u16 ghcb_version; + __u16 pad1; + __u32 pad2[8]; +}; + struct kvm_sev_launch_start { __u32 handle; __u32 policy; @@ -856,5 +872,7 @@ struct kvm_hyperv_eventfd { #define KVM_X86_DEFAULT_VM 0 #define KVM_X86_SW_PROTECTED_VM 1 +#define KVM_X86_SEV_VM 2 +#define KVM_X86_SEV_ES_VM 3 #endif /* _ASM_X86_KVM_H */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index ea32b101b9999..d03842abae578 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1221,9 +1221,9 @@ struct kvm_vfio_spapr_tce { /* Available with KVM_CAP_SPAPR_RESIZE_HPT */ #define KVM_PPC_RESIZE_HPT_PREPARE _IOR(KVMIO, 0xad, struct kvm_ppc_resize_hpt) #define KVM_PPC_RESIZE_HPT_COMMIT _IOR(KVMIO, 0xae, struct kvm_ppc_resize_hpt) -/* Available with KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_MMU_HASH_V3 */ +/* Available with KVM_CAP_PPC_MMU_RADIX or KVM_CAP_PPC_MMU_HASH_V3 */ #define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg) -/* Available with KVM_CAP_PPC_RADIX_MMU */ +/* Available with KVM_CAP_PPC_MMU_RADIX */ #define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) /* Available with KVM_CAP_PPC_GET_CPU_CHAR */ #define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char) From c7a5096781732e0f9784551309484f3e103f6750 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 28 May 2024 13:25:02 +0300 Subject: [PATCH 074/537] PNP: Make dev_is_pnp() to be a function and export it for modules Since we have a dev_is_pnp() macro that utilises the address of the pnp_bus_type variable, the users, which can be compiled as modules, will fail to build. Convert the macro to be a function and export it to the modules to prevent build breakage. Reported-by: Woody Suwalski Closes: https://lore.kernel.org/r/cc8a93b2-2504-9754-e26c-5d5c3bd1265c@gmail.com Fixes: 2a49b45cd0e7 ("PNP: Add dev_is_pnp() macro") Signed-off-by: Andy Shevchenko Reviewed-by: Christoph Hellwig Signed-off-by: Rafael J. Wysocki --- drivers/pnp/driver.c | 6 ++++++ include/linux/pnp.h | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c index 0a5d0d8befa84..3483e52e3a81d 100644 --- a/drivers/pnp/driver.c +++ b/drivers/pnp/driver.c @@ -266,6 +266,12 @@ const struct bus_type pnp_bus_type = { .dev_groups = pnp_dev_groups, }; +bool dev_is_pnp(const struct device *dev) +{ + return dev->bus == &pnp_bus_type; +} +EXPORT_SYMBOL_GPL(dev_is_pnp); + int pnp_register_driver(struct pnp_driver *drv) { drv->driver.name = drv->name; diff --git a/include/linux/pnp.h b/include/linux/pnp.h index 82561242cda42..a8def1cea32c0 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -469,7 +469,7 @@ int compare_pnp_id(struct pnp_id *pos, const char *id); int pnp_register_driver(struct pnp_driver *drv); void pnp_unregister_driver(struct pnp_driver *drv); -#define dev_is_pnp(d) ((d)->bus == &pnp_bus_type) +bool dev_is_pnp(const struct device *dev); #else @@ -502,7 +502,7 @@ static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -E static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; } static inline void pnp_unregister_driver(struct pnp_driver *drv) { } -#define dev_is_pnp(d) false +static inline bool dev_is_pnp(const struct device *dev) { return false; } #endif /* CONFIG_PNP */ From edcde848c01eb071a91d479a6b3101d9cf48e905 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 28 May 2024 13:25:03 +0300 Subject: [PATCH 075/537] PNP: Hide pnp_bus_type from the non-PNP code The pnp_bus_type is defined only when CONFIG_PNP=y, while being not guarded by ifdeffery in the header. Moreover, it's not used outside of the PNP code. Move it to the internal header to make sure no-one will try to (ab)use it. Signed-off-by: Andy Shevchenko Reviewed-by: Christoph Hellwig Signed-off-by: Rafael J. Wysocki --- drivers/pnp/base.h | 1 + include/linux/pnp.h | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/pnp/base.h b/drivers/pnp/base.h index e74a0f6a31572..4e80273dfb1ec 100644 --- a/drivers/pnp/base.h +++ b/drivers/pnp/base.h @@ -6,6 +6,7 @@ extern struct mutex pnp_lock; extern const struct attribute_group *pnp_dev_groups[]; +extern const struct bus_type pnp_bus_type; int pnp_register_protocol(struct pnp_protocol *protocol); void pnp_unregister_protocol(struct pnp_protocol *protocol); diff --git a/include/linux/pnp.h b/include/linux/pnp.h index a8def1cea32c0..7f2ff95d2deb5 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -435,8 +435,6 @@ struct pnp_protocol { #define protocol_for_each_dev(protocol, dev) \ list_for_each_entry(dev, &(protocol)->devices, protocol_list) -extern const struct bus_type pnp_bus_type; - #if defined(CONFIG_PNP) /* device management */ From 2f523f29d3b19a668b8d4ce6f768d8faff976b3a Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 28 May 2024 16:54:31 -0300 Subject: [PATCH 076/537] tools headers UAPI: Update i915_drm.h with the kernel sources Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/drm/i915_drm.h | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 2ee338860b7e0..d4d86e566e077 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -806,6 +806,12 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_PXP_STATUS 58 +/* + * Query if kernel allows marking a context to send a Freq hint to SLPC. This + * will enable use of the strategies allowed by the SLPC algorithm. + */ +#define I915_PARAM_HAS_CONTEXT_FREQ_HINT 59 + /* Must be kept compact -- no holes and well documented */ /** @@ -2148,6 +2154,15 @@ struct drm_i915_gem_context_param { * -EIO: The firmware did not succeed in creating the protected context. */ #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd + +/* + * I915_CONTEXT_PARAM_LOW_LATENCY: + * + * Mark this context as a low latency workload which requires aggressive GT + * frequency scaling. Use I915_PARAM_HAS_CONTEXT_FREQ_HINT to check if the kernel + * supports this per context flag. + */ +#define I915_CONTEXT_PARAM_LOW_LATENCY 0xe /* Must be kept compact -- no holes and well documented */ /** @value: Context parameter value to be set or queried */ @@ -2623,19 +2638,29 @@ struct drm_i915_reg_read { * */ +/* + * struct drm_i915_reset_stats - Return global reset and other context stats + * + * Driver keeps few stats for each contexts and also global reset count. + * This struct can be used to query those stats. + */ struct drm_i915_reset_stats { + /** @ctx_id: ID of the requested context */ __u32 ctx_id; + + /** @flags: MBZ */ __u32 flags; - /* All resets since boot/module reload, for all contexts */ + /** @reset_count: All resets since boot/module reload, for all contexts */ __u32 reset_count; - /* Number of batches lost when active in GPU, for this context */ + /** @batch_active: Number of batches lost when active in GPU, for this context */ __u32 batch_active; - /* Number of batches lost pending for execution, for this context */ + /** @batch_pending: Number of batches lost pending for execution, for this context */ __u32 batch_pending; + /** @pad: MBZ */ __u32 pad; }; From ac62f52138f752d6c74adc6321e4996d84caf5bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= Date: Sun, 26 May 2024 23:40:01 +0200 Subject: [PATCH 077/537] ACPI: AC: Properly notify powermanagement core about changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The powermanagement core does various actions when a powersupply changes. It calls into notifiers, LED triggers, other power supplies and emits an uevent. To make sure that all these actions happen properly call power_supply_changed(). Reported-by: Rajas Paranjpe Closes: https://github.com/MrChromebox/firmware/issues/420#issuecomment-2132251318 Signed-off-by: Thomas Weißschuh Reviewed-by: Sebastian Reichel Signed-off-by: Rafael J. Wysocki --- drivers/acpi/ac.c | 4 ++-- drivers/acpi/sbs.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 2d4a35e6dd18d..09a87fa222c78 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -145,7 +145,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data) dev_name(&adev->dev), event, (u32) ac->state); acpi_notifier_call_chain(adev, event, (u32) ac->state); - kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE); + power_supply_changed(ac->charger); } } @@ -268,7 +268,7 @@ static int acpi_ac_resume(struct device *dev) if (acpi_ac_get_state(ac)) return 0; if (old_state != ac->state) - kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE); + power_supply_changed(ac->charger); return 0; } diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 94e3c000df2e1..dc8164b182dcc 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -610,7 +610,7 @@ static void acpi_sbs_callback(void *context) if (sbs->charger_exists) { acpi_ac_get_present(sbs); if (sbs->charger_present != saved_charger_state) - kobject_uevent(&sbs->charger->dev.kobj, KOBJ_CHANGE); + power_supply_changed(sbs->charger); } if (sbs->manager_present) { @@ -622,7 +622,7 @@ static void acpi_sbs_callback(void *context) acpi_battery_read(bat); if (saved_battery_state == bat->present) continue; - kobject_uevent(&bat->bat->dev.kobj, KOBJ_CHANGE); + power_supply_changed(bat->bat); } } } From 779b8a14afde110dd3502566be907289eba72447 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 28 May 2024 14:09:23 +0200 Subject: [PATCH 078/537] cpufreq: amd-pstate: remove global header file When extra warnings are enabled, gcc points out a global variable definition in a header: In file included from drivers/cpufreq/amd-pstate-ut.c:29: include/linux/amd-pstate.h:123:27: error: 'amd_pstate_mode_string' defined but not used [-Werror=unused-const-variable=] 123 | static const char * const amd_pstate_mode_string[] = { | ^~~~~~~~~~~~~~~~~~~~~~ This header is only included from two files in the same directory, and one of them uses only a single definition from it, so clean it up by moving most of the contents into the driver that uses them, and making shared bits a local header file. Fixes: 36c5014e5460 ("cpufreq: amd-pstate: optimize driver working mode selection in amd_pstate_param()") Signed-off-by: Arnd Bergmann Acked-by: Mario Limonciello Signed-off-by: Rafael J. Wysocki --- MAINTAINERS | 1 - drivers/cpufreq/amd-pstate-ut.c | 3 +- drivers/cpufreq/amd-pstate.c | 34 ++++++++++++++++++- .../linux => drivers/cpufreq}/amd-pstate.h | 33 ------------------ 4 files changed, 35 insertions(+), 36 deletions(-) rename {include/linux => drivers/cpufreq}/amd-pstate.h (82%) diff --git a/MAINTAINERS b/MAINTAINERS index d6c90161c7bfe..fc31870379f84 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1107,7 +1107,6 @@ L: linux-pm@vger.kernel.org S: Supported F: Documentation/admin-guide/pm/amd-pstate.rst F: drivers/cpufreq/amd-pstate* -F: include/linux/amd-pstate.h F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py AMD PTDMA DRIVER diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c index f04ae67dda372..fc275d41d51e9 100644 --- a/drivers/cpufreq/amd-pstate-ut.c +++ b/drivers/cpufreq/amd-pstate-ut.c @@ -26,10 +26,11 @@ #include #include #include -#include #include +#include "amd-pstate.h" + /* * Abbreviations: * amd_pstate_ut: used as a shortform for AMD P-State unit test. diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 1b7e82a0ad2e6..91993647e09ef 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include @@ -46,6 +45,8 @@ #include #include #include + +#include "amd-pstate.h" #include "amd-pstate-trace.h" #define AMD_PSTATE_TRANSITION_LATENCY 20000 @@ -53,6 +54,37 @@ #define CPPC_HIGHEST_PERF_PERFORMANCE 196 #define CPPC_HIGHEST_PERF_DEFAULT 166 +#define AMD_CPPC_EPP_PERFORMANCE 0x00 +#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 +#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF +#define AMD_CPPC_EPP_POWERSAVE 0xFF + +/* + * enum amd_pstate_mode - driver working mode of amd pstate + */ +enum amd_pstate_mode { + AMD_PSTATE_UNDEFINED = 0, + AMD_PSTATE_DISABLE, + AMD_PSTATE_PASSIVE, + AMD_PSTATE_ACTIVE, + AMD_PSTATE_GUIDED, + AMD_PSTATE_MAX, +}; + +static const char * const amd_pstate_mode_string[] = { + [AMD_PSTATE_UNDEFINED] = "undefined", + [AMD_PSTATE_DISABLE] = "disable", + [AMD_PSTATE_PASSIVE] = "passive", + [AMD_PSTATE_ACTIVE] = "active", + [AMD_PSTATE_GUIDED] = "guided", + NULL, +}; + +struct quirk_entry { + u32 nominal_freq; + u32 lowest_freq; +}; + /* * TODO: We need more time to fine tune processors with shared memory solution * with community together. diff --git a/include/linux/amd-pstate.h b/drivers/cpufreq/amd-pstate.h similarity index 82% rename from include/linux/amd-pstate.h rename to drivers/cpufreq/amd-pstate.h index d58fc022ec466..e6a28e7f4dbf1 100644 --- a/include/linux/amd-pstate.h +++ b/drivers/cpufreq/amd-pstate.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * linux/include/linux/amd-pstate.h - * * Copyright (C) 2022 Advanced Micro Devices, Inc. * * Author: Meng Li @@ -12,11 +10,6 @@ #include -#define AMD_CPPC_EPP_PERFORMANCE 0x00 -#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80 -#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF -#define AMD_CPPC_EPP_POWERSAVE 0xFF - /********************************************************************* * AMD P-state INTERFACE * *********************************************************************/ @@ -108,30 +101,4 @@ struct amd_cpudata { bool suspended; }; -/* - * enum amd_pstate_mode - driver working mode of amd pstate - */ -enum amd_pstate_mode { - AMD_PSTATE_UNDEFINED = 0, - AMD_PSTATE_DISABLE, - AMD_PSTATE_PASSIVE, - AMD_PSTATE_ACTIVE, - AMD_PSTATE_GUIDED, - AMD_PSTATE_MAX, -}; - -static const char * const amd_pstate_mode_string[] = { - [AMD_PSTATE_UNDEFINED] = "undefined", - [AMD_PSTATE_DISABLE] = "disable", - [AMD_PSTATE_PASSIVE] = "passive", - [AMD_PSTATE_ACTIVE] = "active", - [AMD_PSTATE_GUIDED] = "guided", - NULL, -}; - -struct quirk_entry { - u32 nominal_freq; - u32 lowest_freq; -}; - #endif /* _LINUX_AMD_PSTATE_H */ From e4731baaf29438508197d3a8a6d4f5a8c51663f8 Mon Sep 17 00:00:00 2001 From: Dhananjay Ugwekar Date: Mon, 27 May 2024 10:41:28 +0530 Subject: [PATCH 079/537] cpufreq: amd-pstate: Fix the inconsistency in max frequency units The nominal frequency in cpudata is maintained in MHz whereas all other frequencies are in KHz. This means we have to convert nominal frequency value to KHz before we do any interaction with other frequency values. In amd_pstate_set_boost(), this conversion from MHz to KHz is missed, fix that. Tested on a AMD Zen4 EPYC server Before: $ cat /sys/devices/system/cpu/cpufreq/policy*/scaling_max_freq | uniq 2151 $ cat /sys/devices/system/cpu/cpufreq/policy*/cpuinfo_min_freq | uniq 400000 $ cat /sys/devices/system/cpu/cpufreq/policy*/scaling_cur_freq | uniq 2151 409422 After: $ cat /sys/devices/system/cpu/cpufreq/policy*/scaling_max_freq | uniq 2151000 $ cat /sys/devices/system/cpu/cpufreq/policy*/cpuinfo_min_freq | uniq 400000 $ cat /sys/devices/system/cpu/cpufreq/policy*/scaling_cur_freq | uniq 2151000 1799527 Fixes: ec437d71db77 ("cpufreq: amd-pstate: Introduce a new AMD P-State driver to support future processors") Signed-off-by: Dhananjay Ugwekar Acked-by: Mario Limonciello Acked-by: Gautham R. Shenoy Tested-by: Peter Jung Cc: 5.17+ # 5.17+ Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/amd-pstate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 91993647e09ef..9ad62dbe8bfbf 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -701,7 +701,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) if (state) policy->cpuinfo.max_freq = cpudata->max_freq; else - policy->cpuinfo.max_freq = cpudata->nominal_freq; + policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000; policy->max = policy->cpuinfo.max_freq; From d55510527153d17a3af8cc2df69c04f95ae1350d Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 28 May 2024 15:55:51 -0700 Subject: [PATCH 080/537] cxl/test: Add missing vmalloc.h for tools/testing/cxl/test/mem.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit tools/testing/cxl/test/mem.c uses vmalloc() and vfree() but does not include linux/vmalloc.h. Kernel v6.10 made changes that causes the currently included headers not depend on vmalloc.h and therefore mem.c can no longer compile. Add linux/vmalloc.h to fix compile issue. CC [M] tools/testing/cxl/test/mem.o tools/testing/cxl/test/mem.c: In function ‘label_area_release’: tools/testing/cxl/test/mem.c:1428:9: error: implicit declaration of function ‘vfree’; did you mean ‘kvfree’? [-Werror=implicit-function-declaration] 1428 | vfree(lsa); | ^~~~~ | kvfree tools/testing/cxl/test/mem.c: In function ‘cxl_mock_mem_probe’: tools/testing/cxl/test/mem.c:1466:22: error: implicit declaration of function ‘vmalloc’; did you mean ‘kmalloc’? [-Werror=implicit-function-declaration] 1466 | mdata->lsa = vmalloc(LSA_SIZE); | ^~~~~~~ | kmalloc Fixes: 7d3eb23c4ccf ("tools/testing/cxl: Introduce a mock memory device + driver") Reviewed-by: Dan Williams Reviewed-by: Alison Schofield Link: https://lore.kernel.org/r/20240528225551.1025977-1-dave.jiang@intel.com Signed-off-by: Dave Jiang --- tools/testing/cxl/test/mem.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 6584443144de8..eaf091a3d3313 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include From 49ba7b515c4c0719b866d16f068e62d16a8a3dd1 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Tue, 7 May 2024 13:34:21 +0800 Subject: [PATCH 081/537] cxl/region: Fix memregion leaks in devm_cxl_add_region() Move the mode verification to __create_region() before allocating the memregion to avoid the memregion leaks. Fixes: 6e099264185d ("cxl/region: Add volatile region creation support") Signed-off-by: Li Zhijian Reviewed-by: Dan Williams Link: https://lore.kernel.org/r/20240507053421.456439-1-lizhijian@fujitsu.com Signed-off-by: Dave Jiang --- drivers/cxl/core/region.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 00a9f0eef8dd2..3c2b6144be23c 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2352,15 +2352,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, struct device *dev; int rc; - switch (mode) { - case CXL_DECODER_RAM: - case CXL_DECODER_PMEM: - break; - default: - dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); - return ERR_PTR(-EINVAL); - } - cxlr = cxl_region_alloc(cxlrd, id); if (IS_ERR(cxlr)) return cxlr; @@ -2415,6 +2406,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, { int rc; + switch (mode) { + case CXL_DECODER_RAM: + case CXL_DECODER_PMEM: + break; + default: + dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); + return ERR_PTR(-EINVAL); + } + rc = memregion_alloc(GFP_KERNEL); if (rc < 0) return ERR_PTR(rc); From ed7ee6a69f9289337af4835a908aa782263d4852 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 29 May 2024 10:39:01 +0200 Subject: [PATCH 082/537] statx: Update offset commentary for struct statx In commit 2a82bb02941f ("statx: stx_subvol"), a new member was added to struct statx, but the offset comment was not correct. Update it. Signed-off-by: John Garry Link: https://lore.kernel.org/r/20240529081725.3769290-1-john.g.garry@oracle.com Signed-off-by: Christian Brauner --- include/uapi/linux/stat.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 67626d5353166..95770941ee2c6 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -126,8 +126,8 @@ struct statx { __u64 stx_mnt_id; __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */ - __u64 stx_subvol; /* Subvolume identifier */ /* 0xa0 */ + __u64 stx_subvol; /* Subvolume identifier */ __u64 __spare3[11]; /* Spare space for future expansion */ /* 0x100 */ }; From cc5ac966f26193ab185cc43d64d9f1ae998ccb6e Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:42:57 +0800 Subject: [PATCH 083/537] cachefiles: add output string to cachefiles_obj_[get|put]_ondemand_fd This lets us see the correct trace output. Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie") Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-2-libaokun@huaweicloud.com Acked-by: Jeff Layton Reviewed-by: Jingbo Xu Signed-off-by: Christian Brauner --- include/trace/events/cachefiles.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index cf4b98b9a9edc..e3213af847cdf 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -127,7 +127,9 @@ enum cachefiles_error_trace { EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \ EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \ EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \ - E_(cachefiles_obj_see_withdrawal, "SEE withdrawal") + EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \ + EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \ + E_(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") #define cachefiles_coherency_traces \ EM(cachefiles_coherency_check_aux, "BAD aux ") \ From 0fc75c5940fa634d84e64c93bfc388e1274ed013 Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:42:58 +0800 Subject: [PATCH 084/537] cachefiles: remove requests from xarray during flushing requests Even with CACHEFILES_DEAD set, we can still read the requests, so in the following concurrency the request may be used after it has been freed: mount | daemon_thread1 | daemon_thread2 ------------------------------------------------------------ cachefiles_ondemand_init_object cachefiles_ondemand_send_req REQ_A = kzalloc(sizeof(*req) + data_len) wait_for_completion(&REQ_A->done) cachefiles_daemon_read cachefiles_ondemand_daemon_read // close dev fd cachefiles_flush_reqs complete(&REQ_A->done) kfree(REQ_A) xa_lock(&cache->reqs); cachefiles_ondemand_select_req req->msg.opcode != CACHEFILES_OP_READ // req use-after-free !!! xa_unlock(&cache->reqs); xa_destroy(&cache->reqs) Hence remove requests from cache->reqs when flushing them to avoid accessing freed requests. Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie") Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-3-libaokun@huaweicloud.com Acked-by: Jeff Layton Reviewed-by: Jia Zhu Reviewed-by: Gao Xiang Reviewed-by: Jingbo Xu Signed-off-by: Christian Brauner --- fs/cachefiles/daemon.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 6465e25742309..ccb7b707ea4b7 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -159,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache) xa_for_each(xa, index, req) { req->error = -EIO; complete(&req->done); + __xa_erase(xa, index); } xa_unlock(xa); From de3e26f9e5b76fc628077578c001c4a51bf54d06 Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:42:59 +0800 Subject: [PATCH 085/537] cachefiles: fix slab-use-after-free in cachefiles_ondemand_get_fd() We got the following issue in a fuzz test of randomly issuing the restore command: ================================================================== BUG: KASAN: slab-use-after-free in cachefiles_ondemand_daemon_read+0x609/0xab0 Write of size 4 at addr ffff888109164a80 by task ondemand-04-dae/4962 CPU: 11 PID: 4962 Comm: ondemand-04-dae Not tainted 6.8.0-rc7-dirty #542 Call Trace: kasan_report+0x94/0xc0 cachefiles_ondemand_daemon_read+0x609/0xab0 vfs_read+0x169/0xb50 ksys_read+0xf5/0x1e0 Allocated by task 626: __kmalloc+0x1df/0x4b0 cachefiles_ondemand_send_req+0x24d/0x690 cachefiles_create_tmpfile+0x249/0xb30 cachefiles_create_file+0x6f/0x140 cachefiles_look_up_object+0x29c/0xa60 cachefiles_lookup_cookie+0x37d/0xca0 fscache_cookie_state_machine+0x43c/0x1230 [...] Freed by task 626: kfree+0xf1/0x2c0 cachefiles_ondemand_send_req+0x568/0x690 cachefiles_create_tmpfile+0x249/0xb30 cachefiles_create_file+0x6f/0x140 cachefiles_look_up_object+0x29c/0xa60 cachefiles_lookup_cookie+0x37d/0xca0 fscache_cookie_state_machine+0x43c/0x1230 [...] ================================================================== Following is the process that triggers the issue: mount | daemon_thread1 | daemon_thread2 ------------------------------------------------------------ cachefiles_ondemand_init_object cachefiles_ondemand_send_req REQ_A = kzalloc(sizeof(*req) + data_len) wait_for_completion(&REQ_A->done) cachefiles_daemon_read cachefiles_ondemand_daemon_read REQ_A = cachefiles_ondemand_select_req cachefiles_ondemand_get_fd copy_to_user(_buffer, msg, n) process_open_req(REQ_A) ------ restore ------ cachefiles_ondemand_restore xas_for_each(&xas, req, ULONG_MAX) xas_set_mark(&xas, CACHEFILES_REQ_NEW); cachefiles_daemon_read cachefiles_ondemand_daemon_read REQ_A = cachefiles_ondemand_select_req write(devfd, ("copen %u,%llu", msg->msg_id, size)); cachefiles_ondemand_copen xa_erase(&cache->reqs, id) complete(&REQ_A->done) kfree(REQ_A) cachefiles_ondemand_get_fd(REQ_A) fd = get_unused_fd_flags file = anon_inode_getfile fd_install(fd, file) load = (void *)REQ_A->msg.data; load->fd = fd; // load UAF !!! This issue is caused by issuing a restore command when the daemon is still alive, which results in a request being processed multiple times thus triggering a UAF. So to avoid this problem, add an additional reference count to cachefiles_req, which is held while waiting and reading, and then released when the waiting and reading is over. Note that since there is only one reference count for waiting, we need to avoid the same request being completed multiple times, so we can only complete the request if it is successfully removed from the xarray. Fixes: e73fa11a356c ("cachefiles: add restore command to recover inflight ondemand read requests") Suggested-by: Hou Tao Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-4-libaokun@huaweicloud.com Acked-by: Jeff Layton Reviewed-by: Jia Zhu Reviewed-by: Jingbo Xu Signed-off-by: Christian Brauner --- fs/cachefiles/internal.h | 1 + fs/cachefiles/ondemand.c | 23 +++++++++++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index d33169f0018b1..7745b8abc3aa3 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -138,6 +138,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) struct cachefiles_req { struct cachefiles_object *object; struct completion done; + refcount_t ref; int error; struct cachefiles_msg msg; }; diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 4ba42f1fa3b40..c011fb24d2382 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -4,6 +4,12 @@ #include #include "internal.h" +static inline void cachefiles_req_put(struct cachefiles_req *req) +{ + if (refcount_dec_and_test(&req->ref)) + kfree(req); +} + static int cachefiles_ondemand_fd_release(struct inode *inode, struct file *file) { @@ -330,6 +336,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, xas_clear_mark(&xas, CACHEFILES_REQ_NEW); cache->req_id_next = xas.xa_index + 1; + refcount_inc(&req->ref); xa_unlock(&cache->reqs); id = xas.xa_index; @@ -356,15 +363,22 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, complete(&req->done); } + cachefiles_req_put(req); return n; err_put_fd: if (msg->opcode == CACHEFILES_OP_OPEN) close_fd(((struct cachefiles_open *)msg->data)->fd); error: - xa_erase(&cache->reqs, id); - req->error = ret; - complete(&req->done); + xas_reset(&xas); + xas_lock(&xas); + if (xas_load(&xas) == req) { + req->error = ret; + complete(&req->done); + xas_store(&xas, NULL); + } + xas_unlock(&xas); + cachefiles_req_put(req); return ret; } @@ -395,6 +409,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, goto out; } + refcount_set(&req->ref, 1); req->object = object; init_completion(&req->done); req->msg.opcode = opcode; @@ -456,7 +471,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, wake_up_all(&cache->daemon_pollwq); wait_for_completion(&req->done); ret = req->error; - kfree(req); + cachefiles_req_put(req); return ret; out: /* Reset the object to close state in error handling path. From da4a827416066191aafeeccee50a8836a826ba10 Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:43:00 +0800 Subject: [PATCH 086/537] cachefiles: fix slab-use-after-free in cachefiles_ondemand_daemon_read() We got the following issue in a fuzz test of randomly issuing the restore command: ================================================================== BUG: KASAN: slab-use-after-free in cachefiles_ondemand_daemon_read+0xb41/0xb60 Read of size 8 at addr ffff888122e84088 by task ondemand-04-dae/963 CPU: 13 PID: 963 Comm: ondemand-04-dae Not tainted 6.8.0-dirty #564 Call Trace: kasan_report+0x93/0xc0 cachefiles_ondemand_daemon_read+0xb41/0xb60 vfs_read+0x169/0xb50 ksys_read+0xf5/0x1e0 Allocated by task 116: kmem_cache_alloc+0x140/0x3a0 cachefiles_lookup_cookie+0x140/0xcd0 fscache_cookie_state_machine+0x43c/0x1230 [...] Freed by task 792: kmem_cache_free+0xfe/0x390 cachefiles_put_object+0x241/0x480 fscache_cookie_state_machine+0x5c8/0x1230 [...] ================================================================== Following is the process that triggers the issue: mount | daemon_thread1 | daemon_thread2 ------------------------------------------------------------ cachefiles_withdraw_cookie cachefiles_ondemand_clean_object(object) cachefiles_ondemand_send_req REQ_A = kzalloc(sizeof(*req) + data_len) wait_for_completion(&REQ_A->done) cachefiles_daemon_read cachefiles_ondemand_daemon_read REQ_A = cachefiles_ondemand_select_req msg->object_id = req->object->ondemand->ondemand_id ------ restore ------ cachefiles_ondemand_restore xas_for_each(&xas, req, ULONG_MAX) xas_set_mark(&xas, CACHEFILES_REQ_NEW) cachefiles_daemon_read cachefiles_ondemand_daemon_read REQ_A = cachefiles_ondemand_select_req copy_to_user(_buffer, msg, n) xa_erase(&cache->reqs, id) complete(&REQ_A->done) ------ close(fd) ------ cachefiles_ondemand_fd_release cachefiles_put_object cachefiles_put_object kmem_cache_free(cachefiles_object_jar, object) REQ_A->object->ondemand->ondemand_id // object UAF !!! When we see the request within xa_lock, req->object must not have been freed yet, so grab the reference count of object before xa_unlock to avoid the above issue. Fixes: 0a7e54c1959c ("cachefiles: resend an open request if the read request's object is closed") Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-5-libaokun@huaweicloud.com Acked-by: Jeff Layton Reviewed-by: Jia Zhu Reviewed-by: Jingbo Xu Signed-off-by: Christian Brauner --- fs/cachefiles/ondemand.c | 3 +++ include/trace/events/cachefiles.h | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index c011fb24d2382..3dd002108a872 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -337,6 +337,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, xas_clear_mark(&xas, CACHEFILES_REQ_NEW); cache->req_id_next = xas.xa_index + 1; refcount_inc(&req->ref); + cachefiles_grab_object(req->object, cachefiles_obj_get_read_req); xa_unlock(&cache->reqs); id = xas.xa_index; @@ -357,6 +358,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, goto err_put_fd; } + cachefiles_put_object(req->object, cachefiles_obj_put_read_req); /* CLOSE request has no reply */ if (msg->opcode == CACHEFILES_OP_CLOSE) { xa_erase(&cache->reqs, id); @@ -370,6 +372,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, if (msg->opcode == CACHEFILES_OP_OPEN) close_fd(((struct cachefiles_open *)msg->data)->fd); error: + cachefiles_put_object(req->object, cachefiles_obj_put_read_req); xas_reset(&xas); xas_lock(&xas); if (xas_load(&xas) == req) { diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index e3213af847cdf..7d931db02b934 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace { cachefiles_obj_see_withdrawal, cachefiles_obj_get_ondemand_fd, cachefiles_obj_put_ondemand_fd, + cachefiles_obj_get_read_req, + cachefiles_obj_put_read_req, }; enum fscache_why_object_killed { @@ -129,7 +131,9 @@ enum cachefiles_error_trace { EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \ EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \ EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \ - E_(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") + EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \ + EM(cachefiles_obj_get_read_req, "GET read_req") \ + E_(cachefiles_obj_put_read_req, "PUT read_req") #define cachefiles_coherency_traces \ EM(cachefiles_coherency_check_aux, "BAD aux ") \ From 3e6d704f02aa4c50c7bc5fe91a4401df249a137b Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:43:01 +0800 Subject: [PATCH 087/537] cachefiles: remove err_put_fd label in cachefiles_ondemand_daemon_read() The err_put_fd label is only used once, so remove it to make the code more readable. In addition, the logic for deleting error request and CLOSE request is merged to simplify the code. Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-6-libaokun@huaweicloud.com Acked-by: Jeff Layton Reviewed-by: Jia Zhu Reviewed-by: Gao Xiang Reviewed-by: Jingbo Xu Signed-off-by: Christian Brauner --- fs/cachefiles/ondemand.c | 45 ++++++++++++++-------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 3dd002108a872..bb94ef6a6f61b 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -305,7 +305,6 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, { struct cachefiles_req *req; struct cachefiles_msg *msg; - unsigned long id = 0; size_t n; int ret = 0; XA_STATE(xas, &cache->reqs, cache->req_id_next); @@ -340,49 +339,37 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, cachefiles_grab_object(req->object, cachefiles_obj_get_read_req); xa_unlock(&cache->reqs); - id = xas.xa_index; - if (msg->opcode == CACHEFILES_OP_OPEN) { ret = cachefiles_ondemand_get_fd(req); if (ret) { cachefiles_ondemand_set_object_close(req->object); - goto error; + goto out; } } - msg->msg_id = id; + msg->msg_id = xas.xa_index; msg->object_id = req->object->ondemand->ondemand_id; if (copy_to_user(_buffer, msg, n) != 0) { ret = -EFAULT; - goto err_put_fd; - } - - cachefiles_put_object(req->object, cachefiles_obj_put_read_req); - /* CLOSE request has no reply */ - if (msg->opcode == CACHEFILES_OP_CLOSE) { - xa_erase(&cache->reqs, id); - complete(&req->done); + if (msg->opcode == CACHEFILES_OP_OPEN) + close_fd(((struct cachefiles_open *)msg->data)->fd); } - - cachefiles_req_put(req); - return n; - -err_put_fd: - if (msg->opcode == CACHEFILES_OP_OPEN) - close_fd(((struct cachefiles_open *)msg->data)->fd); -error: +out: cachefiles_put_object(req->object, cachefiles_obj_put_read_req); - xas_reset(&xas); - xas_lock(&xas); - if (xas_load(&xas) == req) { - req->error = ret; - complete(&req->done); - xas_store(&xas, NULL); + /* Remove error request and CLOSE request has no reply */ + if (ret || msg->opcode == CACHEFILES_OP_CLOSE) { + xas_reset(&xas); + xas_lock(&xas); + if (xas_load(&xas) == req) { + req->error = ret; + complete(&req->done); + xas_store(&xas, NULL); + } + xas_unlock(&xas); } - xas_unlock(&xas); cachefiles_req_put(req); - return ret; + return ret ? ret : n; } typedef int (*init_req_fn)(struct cachefiles_req *req, void *private); From a26dc49df37e996876f50a0210039b2d211fdd6f Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:43:02 +0800 Subject: [PATCH 088/537] cachefiles: add consistency check for copen/cread This prevents malicious processes from completing random copen/cread requests and crashing the system. Added checks are listed below: * Generic, copen can only complete open requests, and cread can only complete read requests. * For copen, ondemand_id must not be 0, because this indicates that the request has not been read by the daemon. * For cread, the object corresponding to fd and req should be the same. Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-7-libaokun@huaweicloud.com Acked-by: Jeff Layton Reviewed-by: Jingbo Xu Signed-off-by: Christian Brauner --- fs/cachefiles/ondemand.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index bb94ef6a6f61b..898fab68332bc 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -82,12 +82,12 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos, } static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl, - unsigned long arg) + unsigned long id) { struct cachefiles_object *object = filp->private_data; struct cachefiles_cache *cache = object->volume->cache; struct cachefiles_req *req; - unsigned long id; + XA_STATE(xas, &cache->reqs, id); if (ioctl != CACHEFILES_IOC_READ_COMPLETE) return -EINVAL; @@ -95,10 +95,15 @@ static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl, if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) return -EOPNOTSUPP; - id = arg; - req = xa_erase(&cache->reqs, id); - if (!req) + xa_lock(&cache->reqs); + req = xas_load(&xas); + if (!req || req->msg.opcode != CACHEFILES_OP_READ || + req->object != object) { + xa_unlock(&cache->reqs); return -EINVAL; + } + xas_store(&xas, NULL); + xa_unlock(&cache->reqs); trace_cachefiles_ondemand_cread(object, id); complete(&req->done); @@ -126,6 +131,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) unsigned long id; long size; int ret; + XA_STATE(xas, &cache->reqs, 0); if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) return -EOPNOTSUPP; @@ -149,9 +155,16 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) if (ret) return ret; - req = xa_erase(&cache->reqs, id); - if (!req) + xa_lock(&cache->reqs); + xas.xa_index = id; + req = xas_load(&xas); + if (!req || req->msg.opcode != CACHEFILES_OP_OPEN || + !req->object->ondemand->ondemand_id) { + xa_unlock(&cache->reqs); return -EINVAL; + } + xas_store(&xas, NULL); + xa_unlock(&cache->reqs); /* fail OPEN request if copen format is invalid */ ret = kstrtol(psize, 0, &size); From 0a790040838c736495d5afd6b2d636f159f817f1 Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:43:03 +0800 Subject: [PATCH 089/537] cachefiles: add spin_lock for cachefiles_ondemand_info The following concurrency may cause a read request to fail to be completed and result in a hung: t1 | t2 --------------------------------------------------------- cachefiles_ondemand_copen req = xa_erase(&cache->reqs, id) // Anon fd is maliciously closed. cachefiles_ondemand_fd_release xa_lock(&cache->reqs) cachefiles_ondemand_set_object_close(object) xa_unlock(&cache->reqs) cachefiles_ondemand_set_object_open // No one will ever close it again. cachefiles_ondemand_daemon_read cachefiles_ondemand_select_req // Get a read req but its fd is already closed. // The daemon can't issue a cread ioctl with an closed fd, then hung. So add spin_lock for cachefiles_ondemand_info to protect ondemand_id and state, thus we can avoid the above problem in cachefiles_ondemand_copen() by using ondemand_id to determine if fd has been closed. Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie") Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-8-libaokun@huaweicloud.com Acked-by: Jeff Layton Signed-off-by: Christian Brauner --- fs/cachefiles/internal.h | 1 + fs/cachefiles/ondemand.c | 35 ++++++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 7745b8abc3aa3..45c8bed605383 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -55,6 +55,7 @@ struct cachefiles_ondemand_info { int ondemand_id; enum cachefiles_object_state state; struct cachefiles_object *object; + spinlock_t lock; }; /* diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 898fab68332bc..d04ddc6576e3b 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -16,13 +16,16 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, struct cachefiles_object *object = file->private_data; struct cachefiles_cache *cache = object->volume->cache; struct cachefiles_ondemand_info *info = object->ondemand; - int object_id = info->ondemand_id; + int object_id; struct cachefiles_req *req; XA_STATE(xas, &cache->reqs, 0); xa_lock(&cache->reqs); + spin_lock(&info->lock); + object_id = info->ondemand_id; info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; cachefiles_ondemand_set_object_close(object); + spin_unlock(&info->lock); /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */ xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { @@ -127,6 +130,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) { struct cachefiles_req *req; struct fscache_cookie *cookie; + struct cachefiles_ondemand_info *info; char *pid, *psize; unsigned long id; long size; @@ -185,6 +189,33 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) goto out; } + info = req->object->ondemand; + spin_lock(&info->lock); + /* + * The anonymous fd was closed before copen ? Fail the request. + * + * t1 | t2 + * --------------------------------------------------------- + * cachefiles_ondemand_copen + * req = xa_erase(&cache->reqs, id) + * // Anon fd is maliciously closed. + * cachefiles_ondemand_fd_release + * xa_lock(&cache->reqs) + * cachefiles_ondemand_set_object_close(object) + * xa_unlock(&cache->reqs) + * cachefiles_ondemand_set_object_open + * // No one will ever close it again. + * cachefiles_ondemand_daemon_read + * cachefiles_ondemand_select_req + * + * Get a read req but its fd is already closed. The daemon can't + * issue a cread ioctl with an closed fd, then hung. + */ + if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) { + spin_unlock(&info->lock); + req->error = -EBADFD; + goto out; + } cookie = req->object->cookie; cookie->object_size = size; if (size) @@ -194,6 +225,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) trace_cachefiles_ondemand_copen(req->object, id, size); cachefiles_ondemand_set_object_open(req->object); + spin_unlock(&info->lock); wake_up_all(&cache->daemon_pollwq); out: @@ -596,6 +628,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, return -ENOMEM; object->ondemand->object = object; + spin_lock_init(&object->ondemand->lock); INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker); return 0; } From 4988e35e95fc938bdde0e15880fe72042fc86acf Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:43:04 +0800 Subject: [PATCH 090/537] cachefiles: never get a new anonymous fd if ondemand_id is valid Now every time the daemon reads an open request, it gets a new anonymous fd and ondemand_id. With the introduction of "restore", it is possible to read the same open request more than once, and therefore an object can have more than one anonymous fd. If the anonymous fd is not unique, the following concurrencies will result in an fd leak: t1 | t2 | t3 ------------------------------------------------------------ cachefiles_ondemand_init_object cachefiles_ondemand_send_req REQ_A = kzalloc(sizeof(*req) + data_len) wait_for_completion(&REQ_A->done) cachefiles_daemon_read cachefiles_ondemand_daemon_read REQ_A = cachefiles_ondemand_select_req cachefiles_ondemand_get_fd load->fd = fd0 ondemand_id = object_id0 ------ restore ------ cachefiles_ondemand_restore // restore REQ_A cachefiles_daemon_read cachefiles_ondemand_daemon_read REQ_A = cachefiles_ondemand_select_req cachefiles_ondemand_get_fd load->fd = fd1 ondemand_id = object_id1 process_open_req(REQ_A) write(devfd, ("copen %u,%llu", msg->msg_id, size)) cachefiles_ondemand_copen xa_erase(&cache->reqs, id) complete(&REQ_A->done) kfree(REQ_A) process_open_req(REQ_A) // copen fails due to no req // daemon close(fd1) cachefiles_ondemand_fd_release // set object closed -- umount -- cachefiles_withdraw_cookie cachefiles_ondemand_clean_object cachefiles_ondemand_init_close_req if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT; // The fd0 is not closed until the daemon exits. However, the anonymous fd holds the reference count of the object and the object holds the reference count of the cookie. So even though the cookie has been relinquished, it will not be unhashed and freed until the daemon exits. In fscache_hash_cookie(), when the same cookie is found in the hash list, if the cookie is set with the FSCACHE_COOKIE_RELINQUISHED bit, then the new cookie waits for the old cookie to be unhashed, while the old cookie is waiting for the leaked fd to be closed, if the daemon does not exit in time it will trigger a hung task. To avoid this, allocate a new anonymous fd only if no anonymous fd has been allocated (ondemand_id == 0) or if the previously allocated anonymous fd has been closed (ondemand_id == -1). Moreover, returns an error if ondemand_id is valid, letting the daemon know that the current userland restore logic is abnormal and needs to be checked. Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie") Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-9-libaokun@huaweicloud.com Acked-by: Jeff Layton Signed-off-by: Christian Brauner --- fs/cachefiles/ondemand.c | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index d04ddc6576e3b..d2d4e27fca6f5 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -14,11 +14,18 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, struct file *file) { struct cachefiles_object *object = file->private_data; - struct cachefiles_cache *cache = object->volume->cache; - struct cachefiles_ondemand_info *info = object->ondemand; + struct cachefiles_cache *cache; + struct cachefiles_ondemand_info *info; int object_id; struct cachefiles_req *req; - XA_STATE(xas, &cache->reqs, 0); + XA_STATE(xas, NULL, 0); + + if (!object) + return 0; + + info = object->ondemand; + cache = object->volume->cache; + xas.xa = &cache->reqs; xa_lock(&cache->reqs); spin_lock(&info->lock); @@ -288,22 +295,39 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) goto err_put_fd; } + spin_lock(&object->ondemand->lock); + if (object->ondemand->ondemand_id > 0) { + spin_unlock(&object->ondemand->lock); + /* Pair with check in cachefiles_ondemand_fd_release(). */ + file->private_data = NULL; + ret = -EEXIST; + goto err_put_file; + } + file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; fd_install(fd, file); load = (void *)req->msg.data; load->fd = fd; object->ondemand->ondemand_id = object_id; + spin_unlock(&object->ondemand->lock); cachefiles_get_unbind_pincount(cache); trace_cachefiles_ondemand_open(object, &req->msg, load); return 0; +err_put_file: + fput(file); err_put_fd: put_unused_fd(fd); err_free_id: xa_erase(&cache->ondemand_ids, object_id); err: + spin_lock(&object->ondemand->lock); + /* Avoid marking an opened object as closed. */ + if (object->ondemand->ondemand_id <= 0) + cachefiles_ondemand_set_object_close(object); + spin_unlock(&object->ondemand->lock); cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd); return ret; } @@ -386,10 +410,8 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, if (msg->opcode == CACHEFILES_OP_OPEN) { ret = cachefiles_ondemand_get_fd(req); - if (ret) { - cachefiles_ondemand_set_object_close(req->object); + if (ret) goto out; - } } msg->msg_id = xas.xa_index; From 4b4391e77a6bf24cba2ef1590e113d9b73b11039 Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:43:05 +0800 Subject: [PATCH 091/537] cachefiles: defer exposing anon_fd until after copy_to_user() succeeds After installing the anonymous fd, we can now see it in userland and close it. However, at this point we may not have gotten the reference count of the cache, but we will put it during colse fd, so this may cause a cache UAF. So grab the cache reference count before fd_install(). In addition, by kernel convention, fd is taken over by the user land after fd_install(), and the kernel should not call close_fd() after that, i.e., it should call fd_install() after everything is ready, thus fd_install() is called after copy_to_user() succeeds. Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie") Suggested-by: Hou Tao Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-10-libaokun@huaweicloud.com Acked-by: Jeff Layton Signed-off-by: Christian Brauner --- fs/cachefiles/ondemand.c | 53 +++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index d2d4e27fca6f5..6f815e7c50867 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -4,6 +4,11 @@ #include #include "internal.h" +struct ondemand_anon_file { + struct file *file; + int fd; +}; + static inline void cachefiles_req_put(struct cachefiles_req *req) { if (refcount_dec_and_test(&req->ref)) @@ -263,14 +268,14 @@ int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args) return 0; } -static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) +static int cachefiles_ondemand_get_fd(struct cachefiles_req *req, + struct ondemand_anon_file *anon_file) { struct cachefiles_object *object; struct cachefiles_cache *cache; struct cachefiles_open *load; - struct file *file; u32 object_id; - int ret, fd; + int ret; object = cachefiles_grab_object(req->object, cachefiles_obj_get_ondemand_fd); @@ -282,16 +287,16 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) if (ret < 0) goto err; - fd = get_unused_fd_flags(O_WRONLY); - if (fd < 0) { - ret = fd; + anon_file->fd = get_unused_fd_flags(O_WRONLY); + if (anon_file->fd < 0) { + ret = anon_file->fd; goto err_free_id; } - file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops, - object, O_WRONLY); - if (IS_ERR(file)) { - ret = PTR_ERR(file); + anon_file->file = anon_inode_getfile("[cachefiles]", + &cachefiles_ondemand_fd_fops, object, O_WRONLY); + if (IS_ERR(anon_file->file)) { + ret = PTR_ERR(anon_file->file); goto err_put_fd; } @@ -299,16 +304,15 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) if (object->ondemand->ondemand_id > 0) { spin_unlock(&object->ondemand->lock); /* Pair with check in cachefiles_ondemand_fd_release(). */ - file->private_data = NULL; + anon_file->file->private_data = NULL; ret = -EEXIST; goto err_put_file; } - file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; - fd_install(fd, file); + anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK; load = (void *)req->msg.data; - load->fd = fd; + load->fd = anon_file->fd; object->ondemand->ondemand_id = object_id; spin_unlock(&object->ondemand->lock); @@ -317,9 +321,11 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) return 0; err_put_file: - fput(file); + fput(anon_file->file); + anon_file->file = NULL; err_put_fd: - put_unused_fd(fd); + put_unused_fd(anon_file->fd); + anon_file->fd = ret; err_free_id: xa_erase(&cache->ondemand_ids, object_id); err: @@ -376,6 +382,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, struct cachefiles_msg *msg; size_t n; int ret = 0; + struct ondemand_anon_file anon_file; XA_STATE(xas, &cache->reqs, cache->req_id_next); xa_lock(&cache->reqs); @@ -409,7 +416,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, xa_unlock(&cache->reqs); if (msg->opcode == CACHEFILES_OP_OPEN) { - ret = cachefiles_ondemand_get_fd(req); + ret = cachefiles_ondemand_get_fd(req, &anon_file); if (ret) goto out; } @@ -417,10 +424,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, msg->msg_id = xas.xa_index; msg->object_id = req->object->ondemand->ondemand_id; - if (copy_to_user(_buffer, msg, n) != 0) { + if (copy_to_user(_buffer, msg, n) != 0) ret = -EFAULT; - if (msg->opcode == CACHEFILES_OP_OPEN) - close_fd(((struct cachefiles_open *)msg->data)->fd); + + if (msg->opcode == CACHEFILES_OP_OPEN) { + if (ret < 0) { + fput(anon_file.file); + put_unused_fd(anon_file.fd); + goto out; + } + fd_install(anon_file.fd, anon_file.file); } out: cachefiles_put_object(req->object, cachefiles_obj_put_read_req); From 4f8703fb3482f92edcfd31661857b16fec89c2c0 Mon Sep 17 00:00:00 2001 From: Zizhi Wo Date: Wed, 22 May 2024 19:43:06 +0800 Subject: [PATCH 092/537] cachefiles: Set object to close if ondemand_id < 0 in copen If copen is maliciously called in the user mode, it may delete the request corresponding to the random id. And the request may have not been read yet. Note that when the object is set to reopen, the open request will be done with the still reopen state in above case. As a result, the request corresponding to this object is always skipped in select_req function, so the read request is never completed and blocks other process. Fix this issue by simply set object to close if its id < 0 in copen. Signed-off-by: Zizhi Wo Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-11-libaokun@huaweicloud.com Acked-by: Jeff Layton Reviewed-by: Jia Zhu Signed-off-by: Christian Brauner --- fs/cachefiles/ondemand.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 6f815e7c50867..922cab1a314b2 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -182,6 +182,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) xas_store(&xas, NULL); xa_unlock(&cache->reqs); + info = req->object->ondemand; /* fail OPEN request if copen format is invalid */ ret = kstrtol(psize, 0, &size); if (ret) { @@ -201,7 +202,6 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) goto out; } - info = req->object->ondemand; spin_lock(&info->lock); /* * The anonymous fd was closed before copen ? Fail the request. @@ -241,6 +241,11 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) wake_up_all(&cache->daemon_pollwq); out: + spin_lock(&info->lock); + /* Need to set object close to avoid reopen status continuing */ + if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) + cachefiles_ondemand_set_object_close(req->object); + spin_unlock(&info->lock); complete(&req->done); return ret; } From 85e833cd7243bda7285492b0653c3abb1e2e757b Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:43:07 +0800 Subject: [PATCH 093/537] cachefiles: flush all requests after setting CACHEFILES_DEAD In ondemand mode, when the daemon is processing an open request, if the kernel flags the cache as CACHEFILES_DEAD, the cachefiles_daemon_write() will always return -EIO, so the daemon can't pass the copen to the kernel. Then the kernel process that is waiting for the copen triggers a hung_task. Since the DEAD state is irreversible, it can only be exited by closing /dev/cachefiles. Therefore, after calling cachefiles_io_error() to mark the cache as CACHEFILES_DEAD, if in ondemand mode, flush all requests to avoid the above hungtask. We may still be able to read some of the cached data before closing the fd of /dev/cachefiles. Note that this relies on the patch that adds reference counting to the req, otherwise it may UAF. Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up cookie") Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-12-libaokun@huaweicloud.com Acked-by: Jeff Layton Signed-off-by: Christian Brauner --- fs/cachefiles/daemon.c | 2 +- fs/cachefiles/internal.h | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index ccb7b707ea4b7..06cdf1a8a16f6 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -133,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file) return 0; } -static void cachefiles_flush_reqs(struct cachefiles_cache *cache) +void cachefiles_flush_reqs(struct cachefiles_cache *cache) { struct xarray *xa = &cache->reqs; struct cachefiles_req *req; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 45c8bed605383..6845a90cdfcce 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -188,6 +188,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache, * daemon.c */ extern const struct file_operations cachefiles_daemon_fops; +extern void cachefiles_flush_reqs(struct cachefiles_cache *cache); extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache); extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache); @@ -426,6 +427,8 @@ do { \ pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \ fscache_io_error((___cache)->cache); \ set_bit(CACHEFILES_DEAD, &(___cache)->flags); \ + if (cachefiles_in_ondemand_mode(___cache)) \ + cachefiles_flush_reqs(___cache); \ } while (0) #define cachefiles_io_error_obj(object, FMT, ...) \ From bc9dde6155464e906e630a0a5c17a4cab241ffbb Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Wed, 22 May 2024 19:43:08 +0800 Subject: [PATCH 094/537] cachefiles: make on-demand read killable Replacing wait_for_completion() with wait_for_completion_killable() in cachefiles_ondemand_send_req() allows us to kill processes that might trigger a hunk_task if the daemon is abnormal. But now only CACHEFILES_OP_READ is killable, because OP_CLOSE and OP_OPEN is initiated from kworker context and the signal is prohibited in these kworker. Note that when the req in xas changes, i.e. xas_load(&xas) != req, it means that a process will complete the current request soon, so wait again for the request to be completed. In addition, add the cachefiles_ondemand_finish_req() helper function to simplify the code. Suggested-by: Hou Tao Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240522114308.2402121-13-libaokun@huaweicloud.com Acked-by: Jeff Layton Reviewed-by: Jia Zhu Signed-off-by: Christian Brauner --- fs/cachefiles/ondemand.c | 40 ++++++++++++++++++++++++++++------------ 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 922cab1a314b2..58bd80956c5a6 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -380,6 +380,20 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa return NULL; } +static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req, + struct xa_state *xas, int err) +{ + if (unlikely(!xas || !req)) + return false; + + if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req) + return false; + + req->error = err; + complete(&req->done); + return true; +} + ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) { @@ -443,16 +457,8 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, out: cachefiles_put_object(req->object, cachefiles_obj_put_read_req); /* Remove error request and CLOSE request has no reply */ - if (ret || msg->opcode == CACHEFILES_OP_CLOSE) { - xas_reset(&xas); - xas_lock(&xas); - if (xas_load(&xas) == req) { - req->error = ret; - complete(&req->done); - xas_store(&xas, NULL); - } - xas_unlock(&xas); - } + if (ret || msg->opcode == CACHEFILES_OP_CLOSE) + cachefiles_ondemand_finish_req(req, &xas, ret); cachefiles_req_put(req); return ret ? ret : n; } @@ -544,8 +550,18 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, goto out; wake_up_all(&cache->daemon_pollwq); - wait_for_completion(&req->done); - ret = req->error; +wait: + ret = wait_for_completion_killable(&req->done); + if (!ret) { + ret = req->error; + } else { + ret = -EINTR; + if (!cachefiles_ondemand_finish_req(req, &xas, ret)) { + /* Someone will complete it soon. */ + cpu_relax(); + goto wait; + } + } cachefiles_req_put(req); return ret; out: From b7d7f11a291830fdf69d3301075dd0fb347ced84 Mon Sep 17 00:00:00 2001 From: Nicolas Escande Date: Tue, 28 May 2024 16:26:05 +0200 Subject: [PATCH 095/537] wifi: mac80211: mesh: Fix leak of mesh_preq_queue objects The hwmp code use objects of type mesh_preq_queue, added to a list in ieee80211_if_mesh, to keep track of mpath we need to resolve. If the mpath gets deleted, ex mesh interface is removed, the entries in that list will never get cleaned. Fix this by flushing all corresponding items of the preq_queue in mesh_path_flush_pending(). This should take care of KASAN reports like this: unreferenced object 0xffff00000668d800 (size 128): comm "kworker/u8:4", pid 67, jiffies 4295419552 (age 1836.444s) hex dump (first 32 bytes): 00 1f 05 09 00 00 ff ff 00 d5 68 06 00 00 ff ff ..........h..... 8e 97 ea eb 3e b8 01 00 00 00 00 00 00 00 00 00 ....>........... backtrace: [<000000007302a0b6>] __kmem_cache_alloc_node+0x1e0/0x35c [<00000000049bd418>] kmalloc_trace+0x34/0x80 [<0000000000d792bb>] mesh_queue_preq+0x44/0x2a8 [<00000000c99c3696>] mesh_nexthop_resolve+0x198/0x19c [<00000000926bf598>] ieee80211_xmit+0x1d0/0x1f4 [<00000000fc8c2284>] __ieee80211_subif_start_xmit+0x30c/0x764 [<000000005926ee38>] ieee80211_subif_start_xmit+0x9c/0x7a4 [<000000004c86e916>] dev_hard_start_xmit+0x174/0x440 [<0000000023495647>] __dev_queue_xmit+0xe24/0x111c [<00000000cfe9ca78>] batadv_send_skb_packet+0x180/0x1e4 [<000000007bacc5d5>] batadv_v_elp_periodic_work+0x2f4/0x508 [<00000000adc3cd94>] process_one_work+0x4b8/0xa1c [<00000000b36425d1>] worker_thread+0x9c/0x634 [<0000000005852dd5>] kthread+0x1bc/0x1c4 [<000000005fccd770>] ret_from_fork+0x10/0x20 unreferenced object 0xffff000009051f00 (size 128): comm "kworker/u8:4", pid 67, jiffies 4295419553 (age 1836.440s) hex dump (first 32 bytes): 90 d6 92 0d 00 00 ff ff 00 d8 68 06 00 00 ff ff ..........h..... 36 27 92 e4 02 e0 01 00 00 58 79 06 00 00 ff ff 6'.......Xy..... backtrace: [<000000007302a0b6>] __kmem_cache_alloc_node+0x1e0/0x35c [<00000000049bd418>] kmalloc_trace+0x34/0x80 [<0000000000d792bb>] mesh_queue_preq+0x44/0x2a8 [<00000000c99c3696>] mesh_nexthop_resolve+0x198/0x19c [<00000000926bf598>] ieee80211_xmit+0x1d0/0x1f4 [<00000000fc8c2284>] __ieee80211_subif_start_xmit+0x30c/0x764 [<000000005926ee38>] ieee80211_subif_start_xmit+0x9c/0x7a4 [<000000004c86e916>] dev_hard_start_xmit+0x174/0x440 [<0000000023495647>] __dev_queue_xmit+0xe24/0x111c [<00000000cfe9ca78>] batadv_send_skb_packet+0x180/0x1e4 [<000000007bacc5d5>] batadv_v_elp_periodic_work+0x2f4/0x508 [<00000000adc3cd94>] process_one_work+0x4b8/0xa1c [<00000000b36425d1>] worker_thread+0x9c/0x634 [<0000000005852dd5>] kthread+0x1bc/0x1c4 [<000000005fccd770>] ret_from_fork+0x10/0x20 Fixes: 050ac52cbe1f ("mac80211: code for on-demand Hybrid Wireless Mesh Protocol") Signed-off-by: Nicolas Escande Link: https://msgid.link/20240528142605.1060566-1-nico.escande@gmail.com Signed-off-by: Johannes Berg --- net/mac80211/mesh_pathtbl.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index a6b62169f0848..c0a5c75cddcb9 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -1017,10 +1017,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, */ void mesh_path_flush_pending(struct mesh_path *mpath) { + struct ieee80211_sub_if_data *sdata = mpath->sdata; + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct mesh_preq_queue *preq, *tmp; struct sk_buff *skb; while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) mesh_path_discard_frame(mpath->sdata, skb); + + spin_lock_bh(&ifmsh->mesh_preq_queue_lock); + list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) { + if (ether_addr_equal(mpath->dst, preq->dst)) { + list_del(&preq->list); + kfree(preq); + --ifmsh->preq_queue_len; + } + } + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); } /** From 6f6291f09a322c1c1578badac8072d049363f4e6 Mon Sep 17 00:00:00 2001 From: Nicolas Escande Date: Mon, 27 May 2024 16:17:59 +0200 Subject: [PATCH 096/537] wifi: mac80211: mesh: init nonpeer_pm to active by default in mesh sdata With a ath9k device I can see that: iw phy phy0 interface add mesh0 type mp ip link set mesh0 up iw dev mesh0 scan Will start a scan with the Power Management bit set in the Frame Control Field. This is because we set this bit depending on the nonpeer_pm variable of the mesh iface sdata and when there are no active links on the interface it remains to NL80211_MESH_POWER_UNKNOWN. As soon as links starts to be established, it wil switch to NL80211_MESH_POWER_ACTIVE as it is the value set by befault on the per sta nonpeer_pm field. As we want no power save by default, (as expressed with the per sta ini values), lets init it to the expected default value of NL80211_MESH_POWER_ACTIVE. Also please note that we cannot change the default value from userspace prior to establishing a link as using NL80211_CMD_SET_MESH_CONFIG will not work before NL80211_CMD_JOIN_MESH has been issued. So too late for our initial scan. Signed-off-by: Nicolas Escande Link: https://msgid.link/20240527141759.299411-1-nico.escande@gmail.com Signed-off-by: Johannes Berg --- net/mac80211/mesh.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index cbc9b5e40cb35..6d4510221c98e 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1776,6 +1776,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) ifmsh->last_preq = jiffies; ifmsh->next_perr = jiffies; ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; + ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; /* Allocate all mesh structures when creating the first mesh interface. */ if (!mesh_allocated) ieee80211s_init(); From 44c06bbde6443de206b30f513100b5670b23fc5e Mon Sep 17 00:00:00 2001 From: Remi Pommarel Date: Wed, 29 May 2024 08:57:53 +0200 Subject: [PATCH 097/537] wifi: mac80211: Fix deadlock in ieee80211_sta_ps_deliver_wakeup() The ieee80211_sta_ps_deliver_wakeup() function takes sta->ps_lock to synchronizes with ieee80211_tx_h_unicast_ps_buf() which is called from softirq context. However using only spin_lock() to get sta->ps_lock in ieee80211_sta_ps_deliver_wakeup() does not prevent softirq to execute on this same CPU, to run ieee80211_tx_h_unicast_ps_buf() and try to take this same lock ending in deadlock. Below is an example of rcu stall that arises in such situation. rcu: INFO: rcu_sched self-detected stall on CPU rcu: 2-....: (42413413 ticks this GP) idle=b154/1/0x4000000000000000 softirq=1763/1765 fqs=21206996 rcu: (t=42586894 jiffies g=2057 q=362405 ncpus=4) CPU: 2 PID: 719 Comm: wpa_supplicant Tainted: G W 6.4.0-02158-g1b062f552873 #742 Hardware name: RPT (r1) (DT) pstate: 00000005 (nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : queued_spin_lock_slowpath+0x58/0x2d0 lr : invoke_tx_handlers_early+0x5b4/0x5c0 sp : ffff00001ef64660 x29: ffff00001ef64660 x28: ffff000009bc1070 x27: ffff000009bc0ad8 x26: ffff000009bc0900 x25: ffff00001ef647a8 x24: 0000000000000000 x23: ffff000009bc0900 x22: ffff000009bc0900 x21: ffff00000ac0e000 x20: ffff00000a279e00 x19: ffff00001ef646e8 x18: 0000000000000000 x17: ffff800016468000 x16: ffff00001ef608c0 x15: 0010533c93f64f80 x14: 0010395c9faa3946 x13: 0000000000000000 x12: 00000000fa83b2da x11: 000000012edeceea x10: ffff0000010fbe00 x9 : 0000000000895440 x8 : 000000000010533c x7 : ffff00000ad8b740 x6 : ffff00000c350880 x5 : 0000000000000007 x4 : 0000000000000001 x3 : 0000000000000000 x2 : 0000000000000000 x1 : 0000000000000001 x0 : ffff00000ac0e0e8 Call trace: queued_spin_lock_slowpath+0x58/0x2d0 ieee80211_tx+0x80/0x12c ieee80211_tx_pending+0x110/0x278 tasklet_action_common.constprop.0+0x10c/0x144 tasklet_action+0x20/0x28 _stext+0x11c/0x284 ____do_softirq+0xc/0x14 call_on_irq_stack+0x24/0x34 do_softirq_own_stack+0x18/0x20 do_softirq+0x74/0x7c __local_bh_enable_ip+0xa0/0xa4 _ieee80211_wake_txqs+0x3b0/0x4b8 __ieee80211_wake_queue+0x12c/0x168 ieee80211_add_pending_skbs+0xec/0x138 ieee80211_sta_ps_deliver_wakeup+0x2a4/0x480 ieee80211_mps_sta_status_update.part.0+0xd8/0x11c ieee80211_mps_sta_status_update+0x18/0x24 sta_apply_parameters+0x3bc/0x4c0 ieee80211_change_station+0x1b8/0x2dc nl80211_set_station+0x444/0x49c genl_family_rcv_msg_doit.isra.0+0xa4/0xfc genl_rcv_msg+0x1b0/0x244 netlink_rcv_skb+0x38/0x10c genl_rcv+0x34/0x48 netlink_unicast+0x254/0x2bc netlink_sendmsg+0x190/0x3b4 ____sys_sendmsg+0x1e8/0x218 ___sys_sendmsg+0x68/0x8c __sys_sendmsg+0x44/0x84 __arm64_sys_sendmsg+0x20/0x28 do_el0_svc+0x6c/0xe8 el0_svc+0x14/0x48 el0t_64_sync_handler+0xb0/0xb4 el0t_64_sync+0x14c/0x150 Using spin_lock_bh()/spin_unlock_bh() instead prevents softirq to raise on the same CPU that is holding the lock. Fixes: 1d147bfa6429 ("mac80211: fix AP powersave TX vs. wakeup race") Signed-off-by: Remi Pommarel Link: https://msgid.link/8e36fe07d0fbc146f89196cd47a53c8a0afe84aa.1716910344.git.repk@triplefau.lt Signed-off-by: Johannes Berg --- net/mac80211/sta_info.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index da5fdd6f5c852..aa22f09e6d145 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1724,7 +1724,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) skb_queue_head_init(&pending); /* sync with ieee80211_tx_h_unicast_ps_buf */ - spin_lock(&sta->ps_lock); + spin_lock_bh(&sta->ps_lock); /* Send all buffered frames to the station */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { int count = skb_queue_len(&pending), tmp; @@ -1753,7 +1753,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) */ clear_sta_flag(sta, WLAN_STA_PSPOLL); clear_sta_flag(sta, WLAN_STA_UAPSD); - spin_unlock(&sta->ps_lock); + spin_unlock_bh(&sta->ps_lock); atomic_dec(&ps->num_sta_ps); From 4dc3a3893dae5a7f73e5809273aca0f1f3548d55 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 23 May 2024 12:05:33 +0200 Subject: [PATCH 098/537] wifi: cfg80211: validate HE operation element parsing Validate that the HE operation element has the correct length before parsing it. Cc: stable@vger.kernel.org Fixes: 645f3d85129d ("wifi: cfg80211: handle UHB AP and STA power type") Reviewed-by: Miriam Rachel Korenblit Link: https://msgid.link/20240523120533.677025eb4a92.I44c091029ef113c294e8fe8b9bf871bf5dbeeb27@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 127853877a0ad..8daed8232b054 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2128,7 +2128,8 @@ static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen, struct ieee80211_he_operation *he_oper; tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen); - if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) { + if (tmp && tmp->datalen >= sizeof(*he_oper) + 1 && + tmp->datalen >= ieee80211_he_oper_size(tmp->data + 1)) { const struct ieee80211_he_6ghz_oper *he_6ghz_oper; he_oper = (void *)&tmp->data[1]; From e296c95eac655008d5a709b8cf54d0018da1c916 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 22 May 2024 12:41:25 +0200 Subject: [PATCH 099/537] wifi: cfg80211: fully move wiphy work to unbound workqueue Previously I had moved the wiphy work to the unbound system workqueue, but missed that when it restarts and during resume it was still using the normal system workqueue. Fix that. Fixes: 91d20ab9d9ca ("wifi: cfg80211: use system_unbound_wq for wiphy work") Reviewed-by: Miriam Rachel Korenblit Link: https://msgid.link/20240522124126.7ca959f2cbd3.I3e2a71ef445d167b84000ccf934ea245aef8d395@changeid Signed-off-by: Johannes Berg --- net/wireless/core.c | 2 +- net/wireless/sysfs.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/wireless/core.c b/net/wireless/core.c index 3fb1b637352a9..4b1f45e3070e0 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -431,7 +431,7 @@ static void cfg80211_wiphy_work(struct work_struct *work) if (wk) { list_del_init(&wk->entry); if (!list_empty(&rdev->wiphy_work_list)) - schedule_work(work); + queue_work(system_unbound_wq, work); spin_unlock_irq(&rdev->wiphy_work_lock); wk->func(&rdev->wiphy, wk); diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 565511a3f461e..62f26618f6747 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -5,7 +5,7 @@ * * Copyright 2005-2006 Jiri Benc * Copyright 2006 Johannes Berg - * Copyright (C) 2020-2021, 2023 Intel Corporation + * Copyright (C) 2020-2021, 2023-2024 Intel Corporation */ #include @@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev) if (rdev->wiphy.registered && rdev->ops->resume) ret = rdev_resume(rdev); rdev->suspended = false; - schedule_work(&rdev->wiphy_work); + queue_work(system_unbound_wq, &rdev->wiphy_work); wiphy_unlock(&rdev->wiphy); if (ret) From 642f89daa34567d02f312d03e41523a894906dae Mon Sep 17 00:00:00 2001 From: Remi Pommarel Date: Tue, 21 May 2024 21:47:26 +0200 Subject: [PATCH 100/537] wifi: cfg80211: Lock wiphy in cfg80211_get_station Wiphy should be locked before calling rdev_get_station() (see lockdep assert in ieee80211_get_station()). This fixes the following kernel NULL dereference: Unable to handle kernel NULL pointer dereference at virtual address 0000000000000050 Mem abort info: ESR = 0x0000000096000006 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 FSC = 0x06: level 2 translation fault Data abort info: ISV = 0, ISS = 0x00000006 CM = 0, WnR = 0 user pgtable: 4k pages, 48-bit VAs, pgdp=0000000003001000 [0000000000000050] pgd=0800000002dca003, p4d=0800000002dca003, pud=08000000028e9003, pmd=0000000000000000 Internal error: Oops: 0000000096000006 [#1] SMP Modules linked in: netconsole dwc3_meson_g12a dwc3_of_simple dwc3 ip_gre gre ath10k_pci ath10k_core ath9k ath9k_common ath9k_hw ath CPU: 0 PID: 1091 Comm: kworker/u8:0 Not tainted 6.4.0-02144-g565f9a3a7911-dirty #705 Hardware name: RPT (r1) (DT) Workqueue: bat_events batadv_v_elp_throughput_metric_update pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : ath10k_sta_statistics+0x10/0x2dc [ath10k_core] lr : sta_set_sinfo+0xcc/0xbd4 sp : ffff000007b43ad0 x29: ffff000007b43ad0 x28: ffff0000071fa900 x27: ffff00000294ca98 x26: ffff000006830880 x25: ffff000006830880 x24: ffff00000294c000 x23: 0000000000000001 x22: ffff000007b43c90 x21: ffff800008898acc x20: ffff00000294c6e8 x19: ffff000007b43c90 x18: 0000000000000000 x17: 445946354d552d78 x16: 62661f7200000000 x15: 57464f445946354d x14: 0000000000000000 x13: 00000000000000e3 x12: d5f0acbcebea978e x11: 00000000000000e3 x10: 000000010048fe41 x9 : 0000000000000000 x8 : ffff000007b43d90 x7 : 000000007a1e2125 x6 : 0000000000000000 x5 : ffff0000024e0900 x4 : ffff800000a0250c x3 : ffff000007b43c90 x2 : ffff00000294ca98 x1 : ffff000006831920 x0 : 0000000000000000 Call trace: ath10k_sta_statistics+0x10/0x2dc [ath10k_core] sta_set_sinfo+0xcc/0xbd4 ieee80211_get_station+0x2c/0x44 cfg80211_get_station+0x80/0x154 batadv_v_elp_get_throughput+0x138/0x1fc batadv_v_elp_throughput_metric_update+0x1c/0xa4 process_one_work+0x1ec/0x414 worker_thread+0x70/0x46c kthread+0xdc/0xe0 ret_from_fork+0x10/0x20 Code: a9bb7bfd 910003fd a90153f3 f9411c40 (f9402814) This happens because STA has time to disconnect and reconnect before batadv_v_elp_throughput_metric_update() delayed work gets scheduled. In this situation, ath10k_sta_state() can be in the middle of resetting arsta data when the work queue get chance to be scheduled and ends up accessing it. Locking wiphy prevents that. Fixes: 7406353d43c8 ("cfg80211: implement cfg80211_get_station cfg80211 API") Signed-off-by: Remi Pommarel Reviewed-by: Nicolas Escande Acked-by: Antonio Quartulli Link: https://msgid.link/983b24a6a176e0800c01aedcd74480d9b551cb13.1716046653.git.repk@triplefau.lt Signed-off-by: Johannes Berg --- net/wireless/util.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/wireless/util.c b/net/wireless/util.c index 2bde8a3546313..082c6f9c5416e 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -2549,6 +2549,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; + int ret; wdev = dev->ieee80211_ptr; if (!wdev) @@ -2560,7 +2561,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr, memset(sinfo, 0, sizeof(*sinfo)); - return rdev_get_station(rdev, dev, mac_addr, sinfo); + wiphy_lock(&rdev->wiphy); + ret = rdev_get_station(rdev, dev, mac_addr, sinfo); + wiphy_unlock(&rdev->wiphy); + + return ret; } EXPORT_SYMBOL(cfg80211_get_station); From ab904521f4de52fef4f179d2dfc1877645ef5f5c Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Tue, 21 May 2024 15:50:59 +0800 Subject: [PATCH 101/537] wifi: cfg80211: pmsr: use correct nla_get_uX functions The commit 9bb7e0f24e7e ("cfg80211: add peer measurement with FTM initiator API") defines four attributes NL80211_PMSR_FTM_REQ_ATTR_ {NUM_BURSTS_EXP}/{BURST_PERIOD}/{BURST_DURATION}/{FTMS_PER_BURST} in following ways. static const struct nla_policy nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { ... [NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD] = { .type = NLA_U16 }, [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = NLA_POLICY_MAX(NLA_U8, 31), ... }; That is, those attributes are expected to be NLA_U8 and NLA_U16 types. However, the consumers of these attributes in `pmsr_parse_ftm` blindly all use `nla_get_u32`, which is incorrect and causes functionality issues on little-endian platforms. Hence, fix them with the correct `nla_get_u8` and `nla_get_u16` functions. Fixes: 9bb7e0f24e7e ("cfg80211: add peer measurement with FTM initiator API") Signed-off-by: Lin Ma Link: https://msgid.link/20240521075059.47999-1-linma@zju.edu.cn Signed-off-by: Johannes Berg --- net/wireless/pmsr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index e106dcea39778..c569c37da3175 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c @@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, out->ftm.burst_period = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]) out->ftm.burst_period = - nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]); + nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]); out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP]; if (out->ftm.asap && !capa->ftm.asap) { @@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, out->ftm.num_bursts_exp = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]) out->ftm.num_bursts_exp = - nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]); + nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]); if (capa->ftm.max_bursts_exponent >= 0 && out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) { @@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, out->ftm.burst_duration = 15; if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]) out->ftm.burst_duration = - nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]); + nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]); out->ftm.ftms_per_burst = 0; if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]) @@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, out->ftm.ftmr_retries = 3; if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]) out->ftm.ftmr_retries = - nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]); + nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]); out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI]; if (out->ftm.request_lci && !capa->ftm.request_lci) { From 02c665f048a439c0d58cc45334c94634bd7c18e6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 15 May 2024 13:34:10 +0200 Subject: [PATCH 102/537] wifi: mac80211: apply mcast rate only if interface is up If the interface isn't enabled, don't apply multicast rate changes immediately. Reported-by: syzbot+de87c09cc7b964ea2e23@syzkaller.appspotmail.com Link: https://msgid.link/20240515133410.d6cffe5756cc.I47b624a317e62bdb4609ff7fa79403c0c444d32d@changeid Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index b08e5d7687e3f..99abbb9e8477b 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2958,8 +2958,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev, memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(int) * NUM_NL80211_BANDS); - ieee80211_link_info_change_notify(sdata, &sdata->deflink, - BSS_CHANGED_MCAST_RATE); + if (ieee80211_sdata_running(sdata)) + ieee80211_link_info_change_notify(sdata, &sdata->deflink, + BSS_CHANGED_MCAST_RATE); return 0; } From 177c6ae9725d783f9e96f02593ce8fb2639be22f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 15 May 2024 13:53:19 +0200 Subject: [PATCH 103/537] wifi: mac80211: handle tasklet frames before stopping The code itself doesn't want to handle frames from the driver if it's already stopped, but if the tasklet was queued before and runs after the stop, then all bets are off. Flush queues before actually stopping, RX should be off at this point since all the interfaces are removed already, etc. Reported-by: syzbot+8830db5d3593b5546d2e@syzkaller.appspotmail.com Link: https://msgid.link/20240515135318.b05f11385c9a.I41c1b33a2e1814c3a7ef352cd7f2951b91785617@changeid Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 2 ++ net/mac80211/main.c | 10 ++++++++-- net/mac80211/util.c | 2 ++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index eb62b7d4b4f7e..3cedfdc9099b7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1845,6 +1845,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, void ieee80211_configure_filter(struct ieee80211_local *local); u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); +void ieee80211_handle_queued_frames(struct ieee80211_local *local); + u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local); int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, u64 *cookie, gfp_t gfp); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 4eaea0a9975b4..1132dea0e290e 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -423,9 +423,8 @@ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) BSS_CHANGED_ERP_SLOT; } -static void ieee80211_tasklet_handler(struct tasklet_struct *t) +void ieee80211_handle_queued_frames(struct ieee80211_local *local) { - struct ieee80211_local *local = from_tasklet(local, t, tasklet); struct sk_buff *skb; while ((skb = skb_dequeue(&local->skb_queue)) || @@ -450,6 +449,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t) } } +static void ieee80211_tasklet_handler(struct tasklet_struct *t) +{ + struct ieee80211_local *local = from_tasklet(local, t, tasklet); + + ieee80211_handle_queued_frames(local); +} + static void ieee80211_restart_work(struct work_struct *work) { struct ieee80211_local *local = diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 0b893e9589594..283bfc99417e5 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1567,6 +1567,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, void ieee80211_stop_device(struct ieee80211_local *local) { + ieee80211_handle_queued_frames(local); + ieee80211_led_radio(local, false); ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO); From f7a8b10bfd614d7a9a16fbe80d28ead4f063cb00 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 10 May 2024 11:37:38 +0200 Subject: [PATCH 104/537] wifi: cfg80211: fix 6 GHz scan request building The 6 GHz scan request struct allocated by cfg80211_scan_6ghz() is meant to be formed this way: [base struct][channels][ssids][6ghz_params] It is allocated with [channels] as the maximum number of channels supported by the driver in the 6 GHz band, since allocation is before knowing how many there will be. However, the inner pointers are set incorrectly: initially, the 6 GHz scan parameters pointer is set: [base struct][channels] ^ scan_6ghz_params and later the SSID pointer is set to the end of the actually _used_ channels. [base struct][channels] ^ ssids If many APs were to be discovered, and many channels used, and there were many SSIDs, then the SSIDs could overlap the 6 GHz parameters. Additionally, the request->ssids for most of the function points to the original request still (given the struct copy) but is used normally, which is confusing. Clear this up, by actually using the allocated space for 6 GHz parameters _after_ the SSIDs, and set up the SSIDs initially so they are used more clearly. Just like in nl80211.c, set them only if there actually are SSIDs though. Finally, also copy the elements (ie/ie_len) so they're part of the same request, not pointing to the old request. Co-developed-by: Miri Korenblit Signed-off-by: Miri Korenblit Reviewed-by: Ilan Peer Signed-off-by: Johannes Berg Link: https://msgid.link/20240510113738.4190692ef4ee.I0cb19188be17a8abd029805e3373c0a7777c214c@changeid Signed-off-by: Johannes Berg --- net/wireless/rdev-ops.h | 6 +++++- net/wireless/scan.c | 47 +++++++++++++++++++++++++++-------------- 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 43897a5269b6a..755af47b88b91 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -2,7 +2,7 @@ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018, 2021-2023 Intel Corporation + * Copyright (C) 2018, 2021-2024 Intel Corporation */ #ifndef __CFG80211_RDEV_OPS #define __CFG80211_RDEV_OPS @@ -458,6 +458,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev, struct cfg80211_scan_request *request) { int ret; + + if (WARN_ON_ONCE(!request->n_ssids && request->ssids)) + return -EINVAL; + trace_rdev_scan(&rdev->wiphy, request); ret = rdev->ops->scan(&rdev->wiphy, request); trace_rdev_return_int(&rdev->wiphy, ret); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 8daed8232b054..2f2a3163968a7 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -812,6 +812,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) LIST_HEAD(coloc_ap_list); bool need_scan_psc = true; const struct ieee80211_sband_iftype_data *iftd; + size_t size, offs_ssids, offs_6ghz_params, offs_ies; rdev_req->scan_6ghz = true; @@ -877,10 +878,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) spin_unlock_bh(&rdev->bss_lock); } - request = kzalloc(struct_size(request, channels, n_channels) + - sizeof(*request->scan_6ghz_params) * count + - sizeof(*request->ssids) * rdev_req->n_ssids, - GFP_KERNEL); + size = struct_size(request, channels, n_channels); + offs_ssids = size; + size += sizeof(*request->ssids) * rdev_req->n_ssids; + offs_6ghz_params = size; + size += sizeof(*request->scan_6ghz_params) * count; + offs_ies = size; + size += rdev_req->ie_len; + + request = kzalloc(size, GFP_KERNEL); if (!request) { cfg80211_free_coloc_ap_list(&coloc_ap_list); return -ENOMEM; @@ -888,8 +894,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) *request = *rdev_req; request->n_channels = 0; - request->scan_6ghz_params = - (void *)&request->channels[n_channels]; + request->n_6ghz_params = 0; + if (rdev_req->n_ssids) { + /* + * Add the ssids from the parent scan request to the new + * scan request, so the driver would be able to use them + * in its probe requests to discover hidden APs on PSC + * channels. + */ + request->ssids = (void *)request + offs_ssids; + memcpy(request->ssids, rdev_req->ssids, + sizeof(*request->ssids) * request->n_ssids); + } + request->scan_6ghz_params = (void *)request + offs_6ghz_params; + + if (rdev_req->ie_len) { + void *ie = (void *)request + offs_ies; + + memcpy(ie, rdev_req->ie, rdev_req->ie_len); + request->ie = ie; + } /* * PSC channels should not be scanned in case of direct scan with 1 SSID @@ -978,17 +1002,8 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) if (request->n_channels) { struct cfg80211_scan_request *old = rdev->int_scan_req; - rdev->int_scan_req = request; - /* - * Add the ssids from the parent scan request to the new scan - * request, so the driver would be able to use them in its - * probe requests to discover hidden APs on PSC channels. - */ - request->ssids = (void *)&request->channels[request->n_channels]; - request->n_ssids = rdev_req->n_ssids; - memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) * - request->n_ssids); + rdev->int_scan_req = request; /* * If this scan follows a previous scan, save the scan start From 8ecc4d7a7cd3e9704b63b8e4f6cd8b6b7314210f Mon Sep 17 00:00:00 2001 From: Aditya Kumar Singh Date: Thu, 9 May 2024 08:55:55 +0530 Subject: [PATCH 105/537] wifi: mac80211: pass proper link id for channel switch started notification Original changes[1] posted is having proper changes. However, at the same time, there was chandef puncturing changes which had a conflict with this. While applying, two errors crept in - a) Whitespace error. b) Link ID being passed to channel switch started notifier function is 0. However proper link ID is present in the function. Fix these now. [1] https://lore.kernel.org/all/20240130140918.1172387-5-quic_adisi@quicinc.com/ Fixes: 1a96bb4e8a79 ("wifi: mac80211: start and finalize channel switch on link basis") Signed-off-by: Aditya Kumar Singh Link: https://msgid.link/20240509032555.263933-1-quic_adisi@quicinc.com Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 99abbb9e8477b..83ad6c9709fe6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -4017,7 +4017,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, goto out; } - link_data->csa_chanreq = chanreq; + link_data->csa_chanreq = chanreq; link_conf->csa_active = true; if (params->block_tx && @@ -4028,7 +4028,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, } cfg80211_ch_switch_started_notify(sdata->dev, - &link_data->csa_chanreq.oper, 0, + &link_data->csa_chanreq.oper, link_id, params->count, params->block_tx); if (changed) { From 92158790ce4391ce4c35d8dfbce759195e4724cb Mon Sep 17 00:00:00 2001 From: Miri Korenblit Date: Sun, 12 May 2024 15:25:00 +0300 Subject: [PATCH 106/537] wifi: iwlwifi: mvm: don't initialize csa_work twice The initialization of this worker moved to iwl_mvm_mac_init_mvmvif but we removed only from the pre-MLD version of the add_interface callback. Remove it also from the MLD version. Fixes: 0bcc2155983e ("wifi: iwlwifi: mvm: init vif works only once") Signed-off-by: Miri Korenblit Reviewed-by: Johannes Berg Link: https://msgid.link/20240512152312.4f15b41604f0.Iec912158e5a706175531d3736d77d25adf02fba4@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 0a3b7284eeddf..fcfd2dd7568e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -75,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, goto out_free_bf; iwl_mvm_tcm_add_vif(mvm, vif); - INIT_DELAYED_WORK(&mvmvif->csa_work, - iwl_mvm_channel_switch_disconnect_wk); if (vif->type == NL80211_IFTYPE_MONITOR) { mvm->monitor_on = true; From 98b7017ddb914a32152ed9aec7468386fff85f8f Mon Sep 17 00:00:00 2001 From: Shaul Triebitz Date: Sun, 12 May 2024 15:25:04 +0300 Subject: [PATCH 107/537] wifi: iwlwifi: mvm: always set the TWT IE offset In beacon template version 14, make sure to always set the TWT IE offset before sending the beacon template command, also in the debugfs inject_beacon_ie path. If the TWT IE does not exist, the offset will be set to zero. Fixes: bf0212fd8faa ("wifi: iwlwifi: mvm: add beacon template version 14") Signed-off-by: Shaul Triebitz Signed-off-by: Miri Korenblit Link: https://msgid.link/20240512152312.eb27175c345a.If30ef24aba10fe47fd42a7a9703eb8903035e294@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 9 +++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 79f4ac8cbc729..8101ecbb478b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1617,6 +1617,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) &beacon_cmd.tim_size, beacon->data, beacon->len); + if (iwl_fw_lookup_cmd_ver(mvm->fw, + BEACON_TEMPLATE_CMD, 0) >= 14) { + u32 offset = iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_S1G_TWT, + beacon->len); + + beacon_cmd.btwt_offset = cpu_to_le32(offset); + } + iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, sizeof(beacon_cmd)); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 5a06f887769a6..5144fa0f96b0e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -873,7 +873,7 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, } } -static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) +u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) { struct ieee80211_mgmt *mgmt = (void *)beacon; const u8 *ie; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 1f58c727fa632..0a1959bd40799 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1758,6 +1758,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2, u64 *boottime, ktime_t *realtime); u32 iwl_mvm_get_systime(struct iwl_mvm *mvm); +u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, From 788e4c75f831d06fcfbbec1d455fac429521e607 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 12 May 2024 07:31:06 +0300 Subject: [PATCH 108/537] wifi: iwlwifi: mvm: fix a crash on 7265 Since IWL_FW_CMD_VER_UNKNOWN = 99, then my change to consider cmd_ver >= 7 instead of cmd_ver = 7 included also firmwares that don't advertise the command version at all. This made us send a command with a bad size and because of that, the firmware hit a BAD_COMMAND immediately after handling the REDUCE_TX_POWER_CMD command. Fixes: 8f892e225f41 ("wifi: iwlwifi: mvm: support iwl_dev_tx_power_cmd_v8") Signed-off-by: Emmanuel Grumbach Reviewed-by: Johannes Berg Signed-off-by: Miri Korenblit Link: https://msgid.link/20240512072733.eb20ff5050d3.Ie4fc6f5496cd296fd6ff20d15e98676f28a3cccd@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index e7f5978ef2d71..ca7fcae6995ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -895,8 +895,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) int ret; u16 len = 0; u32 n_subbands; - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3); + if (cmd_ver >= 7) { len = sizeof(cmd.v7); n_subbands = IWL_NUM_SUB_BANDS_V2; From b7ffca99313d856f7d1cc89038d9061b128e8e97 Mon Sep 17 00:00:00 2001 From: Yedidya Benshimol Date: Fri, 10 May 2024 17:06:29 +0300 Subject: [PATCH 109/537] wifi: iwlwifi: mvm: d3: fix WoWLAN command version lookup After moving from commands to notificaitons in the d3 resume flow, removing the WOWLAN_GET_STATUSES and REPLY_OFFLOADS_QUERY_CMD causes the return of the default value when looking up their version. Returning zero here results in the driver sending the not supported NON_QOS_TX_COUNTER_CMD. Signed-off-by: Yedidya Benshimol Reviewed-by: Gregory Greenman Signed-off-by: Miri Korenblit Link: https://msgid.link/20240510170500.8cabfd580614.If3a0db9851f56041f8f5360959354abd5379224a@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 71e6b06481a93..e37ae34acba60 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2341,7 +2341,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm, out: if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, - WOWLAN_GET_STATUSES, 0) < 10) { + WOWLAN_GET_STATUSES, + IWL_FW_CMD_VER_UNKNOWN) < 10) { mvmvif->seqno_valid = true; /* +0x10 because the set API expects next-to-use, not last-used */ mvmvif->seqno = status->non_qos_seq_ctr + 0x10; From 4a7aace2899711592327463c1a29ffee44fcc66e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 10 May 2024 17:06:33 +0300 Subject: [PATCH 110/537] wifi: iwlwifi: mvm: revert gen2 TX A-MPDU size to 64 We don't actually support >64 even for HE devices, so revert back to 64. This fixes an issue where the session is refused because the queue is configured differently from the actual session later. Fixes: 514c30696fbc ("iwlwifi: add support for IEEE802.11ax") Signed-off-by: Johannes Berg Reviewed-by: Liad Kaufman Reviewed-by: Luciano Coelho Signed-off-by: Miri Korenblit Link: https://msgid.link/20240510170500.52f7b4cf83aa.If47e43adddf7fe250ed7f5571fbb35d8221c7c47@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/rs.h | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 376b23b409dca..6cd4ec4d8f344 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -122,13 +122,8 @@ enum { #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) -/* - * FIXME - various places in firmware API still use u8, - * e.g. LQ command and SCD config command. - * This should be 256 instead. - */ -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255) -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) #define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ From 0f2e9f6f21d1ff292363cdfb5bc4d492eeaff76e Mon Sep 17 00:00:00 2001 From: Mordechay Goodstein Date: Fri, 10 May 2024 17:06:35 +0300 Subject: [PATCH 111/537] wifi: iwlwifi: mvm: set properly mac header In the driver we only use skb_put* for adding data to the skb, hence data never moves and skb_reset_mac_haeder would set mac_header to the first time data was added and not to mac80211 header, fix this my using the actual len of bytes added for setting the mac header. Fixes: 3f7a9d577d47 ("wifi: iwlwifi: mvm: simplify by using SKB MAC header pointer") Signed-off-by: Mordechay Goodstein Reviewed-by: Johannes Berg Signed-off-by: Miri Korenblit Link: https://msgid.link/20240510170500.12f2de2909c3.I72a819b96f2fe55bde192a8fd31a4b96c301aa73@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index d78af29281522..489cfb0a4ab1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -2450,8 +2450,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, * * We mark it as mac header, for upper layers to know where * all radio tap header ends. + * + * Since data doesn't move data while putting data on skb and that is + * the only way we use, data + len is the next place that hdr would be put */ - skb_reset_mac_header(skb); + skb_set_mac_header(skb, skb->len); /* * Override the nss from the rx_vec since the rate_n_flags has From 87821b67dea87addbc4ab093ba752753b002176a Mon Sep 17 00:00:00 2001 From: Shahar S Matityahu Date: Fri, 10 May 2024 17:06:39 +0300 Subject: [PATCH 112/537] wifi: iwlwifi: dbg_ini: move iwl_dbg_tlv_free outside of debugfs ifdef The driver should call iwl_dbg_tlv_free even if debugfs is not defined since ini mode does not depend on debugfs ifdef. Fixes: 68f6f492c4fa ("iwlwifi: trans: support loading ini TLVs from external file") Signed-off-by: Shahar S Matityahu Reviewed-by: Luciano Coelho Signed-off-by: Miri Korenblit Link: https://msgid.link/20240510170500.c8e3723f55b0.I5e805732b0be31ee6b83c642ec652a34e974ff10@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 33654f228ee87..d156a9c641940 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1815,8 +1815,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove_recursive(drv->dbgfs_drv); - iwl_dbg_tlv_free(drv->trans); #endif + iwl_dbg_tlv_free(drv->trans); kfree(drv); err: return ERR_PTR(ret); From cc3ba78f202de9752aceb16342ab62bdfbffac7e Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Mon, 13 May 2024 13:27:08 +0300 Subject: [PATCH 113/537] wifi: iwlwifi: mvm: remove stale STA link data during restart If pre-recovery mac80211 tried to disable a link but this disablement failed, then there might be a mismatch between mac80211 assuming the link has been disabled and the driver still having the data around. During recover itself, that is not a problem, but should the link be activated again at a later point, iwlwifi will refuse the activation as it detects the inconsistent state. Solve this corner-case by iterating the station in the restart cleanup handler. Signed-off-by: Benjamin Berg Signed-off-by: Miri Korenblit Link: https://msgid.link/20240513132416.d2fd60338055.I840d4fdce5fd49fe69896d928b071067e3730259@changeid Signed-off-by: Johannes Berg --- .../net/wireless/intel/iwlwifi/mvm/mac80211.c | 37 +++++++++++++++++++ .../net/wireless/intel/iwlwifi/mvm/mld-sta.c | 10 ++--- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 5 +++ 3 files changed, 47 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 486a6b8f3c97f..628b50ee02443 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1128,6 +1128,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL); } +static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta) +{ + struct iwl_mvm *mvm = data; + struct iwl_mvm_sta *mvm_sta; + struct ieee80211_vif *vif; + int link_id; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + vif = mvm_sta->vif; + + if (!sta->valid_links) + return; + + for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) { + struct iwl_mvm_link_sta *mvm_link_sta; + + mvm_link_sta = + rcu_dereference_check(mvm_sta->link[link_id], + lockdep_is_held(&mvm->mutex)); + if (mvm_link_sta && !(vif->active_links & BIT(link_id))) { + /* + * We have a link STA but the link is inactive in + * mac80211. This will happen if we failed to + * deactivate the link but mac80211 roll back the + * deactivation of the link. + * Delete the stale data to avoid issues later on. + */ + iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta, + link_id, false); + } + } +} + static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) { iwl_mvm_stop_device(mvm); @@ -1150,6 +1183,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) */ ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); + /* cleanup stations as links may be gone after restart */ + ieee80211_iterate_stations_atomic(mvm->hw, + iwl_mvm_cleanup_sta_iterator, mvm); + mvm->p2p_device_vif = NULL; iwl_mvm_reset_phy_ctxts(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index b7a461dba41ee..ae3cde0e64f3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -515,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return iwl_mvm_mld_send_sta_cmd(mvm, &cmd); } -static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, - struct iwl_mvm_link_sta *mvm_sta_link, - unsigned int link_id, - bool is_in_fw) +void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct iwl_mvm_link_sta *mvm_sta_link, + unsigned int link_id, + bool is_in_fw) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], is_in_fw ? ERR_PTR(-EINVAL) : NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 264f1f9394b6d..754a05a8c189b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -662,6 +662,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvm_sta, + struct iwl_mvm_link_sta *mvm_sta_link, + unsigned int link_id, + bool is_in_fw); int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id); int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif, From 08b16d1b5997dc378533318e2a9cd73c7a898284 Mon Sep 17 00:00:00 2001 From: Yedidya Benshimol Date: Mon, 13 May 2024 13:27:09 +0300 Subject: [PATCH 114/537] wifi: iwlwifi: mvm: Handle BIGTK cipher in kek_kck cmd The BIGTK cipher field was added to the kek_kck_material_cmd but wasn't assigned. Fix that by differentiating between the IGTK/BIGTK keys and assign the ciphers fields accordingly. Signed-off-by: Yedidya Benshimol Signed-off-by: Miri Korenblit Link: https://msgid.link/20240513132416.7fd0b22b7267.Ie9b581652b74bd7806980364d59e1b2e78e682c0@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index e37ae34acba60..54f4acbbd05bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, void *_data) { struct wowlan_key_gtk_type_iter *data = _data; + __le32 *cipher = NULL; + + if (key->keyidx == 4 || key->keyidx == 5) + cipher = &data->kek_kck_cmd->igtk_cipher; + if (key->keyidx == 6 || key->keyidx == 7) + cipher = &data->kek_kck_cmd->bigtk_cipher; switch (key->cipher) { default: @@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw, return; case WLAN_CIPHER_SUITE_BIP_GMAC_256: case WLAN_CIPHER_SUITE_BIP_GMAC_128: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + if (cipher) + *cipher = cpu_to_le32(STA_KEY_FLG_GCMP); return; case WLAN_CIPHER_SUITE_AES_CMAC: - data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM); + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + if (cipher) + *cipher = cpu_to_le32(STA_KEY_FLG_CCM); return; case WLAN_CIPHER_SUITE_CCMP: if (!sta) From 4d08c0b3357cba0aeffaf3abc62cae0c154f2816 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 13 May 2024 13:27:10 +0300 Subject: [PATCH 115/537] wifi: iwlwifi: mvm: handle BA session teardown in RF-kill When entering RF-kill, mac80211 tears down BA sessions, but due to RF-kill the commands aren't sent to the device. As a result, there can be frames pending on the reorder buffer or perhaps even received while doing so, leading to warnings. Avoid the warnings by doing the BA session teardown normally even in RF-kill, which also requires queue sync. Signed-off-by: Johannes Berg Signed-off-by: Miri Korenblit Link: https://msgid.link/20240513132416.0762cd80fb3d.I43c5877f3b546159b2db4f36d6d956b333c41cf0@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c | 3 ++- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 12 ++++++++---- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 628b50ee02443..de9f0b4465456 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -6385,7 +6385,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, .len[0] = sizeof(cmd), .data[1] = data, .len[1] = size, - .flags = sync ? 0 : CMD_ASYNC, + .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC), }; int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index ae3cde0e64f3f..9d139b56e1527 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -1014,7 +1014,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm, cmd.modify.tid = cpu_to_le32(data->tid); - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL, + sizeof(cmd), &cmd); data->sta_mask = new_sta_mask; if (ret) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 20d4968d692a3..cc79fe991c263 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2848,7 +2848,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), }; - u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD), + .flags = CMD_SEND_IN_RFKILL, + .len[0] = sizeof(cmd), + .data[0] = &cmd, + }; int ret; BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); @@ -2860,7 +2865,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, cmd.alloc.ssn = cpu_to_le16(ssn); cmd.alloc.win_size = cpu_to_le16(buf_size); baid = -EIO; - } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { + } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) { cmd.remove_v1.baid = cpu_to_le32(baid); BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); } else { @@ -2869,8 +2874,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, cmd.remove.tid = cpu_to_le32(tid); } - ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), - &cmd, &baid); + ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid); if (ret) return ret; From 989830d1cf16bd149bf0690d889a9caef95fb5b1 Mon Sep 17 00:00:00 2001 From: Ayala Beker Date: Mon, 13 May 2024 13:27:11 +0300 Subject: [PATCH 116/537] wifi: iwlwifi: mvm: properly set 6 GHz channel direct probe option Ensure that the 6 GHz channel is configured with a valid direct BSSID, avoiding any invalid or multicast BSSID addresses. Signed-off-by: Ayala Beker Reviewed-by: Ilan Peer Signed-off-by: Miri Korenblit Link: https://msgid.link/20240513132416.91a631a0fe60.I2ea2616af9b8a2eaf959b156c69cf65a2f1204d4@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index a7ec172eeade8..890d4d79b172b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1730,7 +1730,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm, break; } - if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) { + if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE && + !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid), + "scan: invalid BSSID at index %u, index_b=%u\n", + j, idex_b)) { memcpy(&pp->bssid_array[idex_b++], scan_6ghz_params[j].bssid, ETH_ALEN); } From 60d62757df30b74bf397a2847a6db7385c6ee281 Mon Sep 17 00:00:00 2001 From: Miri Korenblit Date: Mon, 13 May 2024 13:27:12 +0300 Subject: [PATCH 117/537] wifi: iwlwifi: mvm: check n_ssids before accessing the ssids In some versions of cfg80211, the ssids poinet might be a valid one even though n_ssids is 0. Accessing the pointer in this case will cuase an out-of-bound access. Fix this by checking n_ssids first. Fixes: c1a7515393e4 ("iwlwifi: mvm: add adaptive dwell support") Signed-off-by: Miri Korenblit Reviewed-by: Ilan Peer Reviewed-by: Johannes Berg Link: https://msgid.link/20240513132416.6e4d1762bf0d.I5a0e6cc8f02050a766db704d15594c61fe583d45@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 890d4d79b172b..9739c38ef8241 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1313,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) cmd->v7.adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else @@ -1418,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm, if (IWL_MVM_ADWELL_MAX_BUDGET) general_params->adwell_max_budget = cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET); - else if (params->ssids && params->ssids[0].ssid_len) + else if (params->n_ssids && params->ssids[0].ssid_len) general_params->adwell_max_budget = cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); else From e6dd2936ce7ce94a1915b799f8af8193ec628e87 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Mon, 13 May 2024 13:27:13 +0300 Subject: [PATCH 118/537] wifi: iwlwifi: mvm: Fix scan abort handling with HW rfkill When HW rfkill is toggled to disable the RF, the flow to stop scan is called. When trying to send the command to abort the scan, since HW rfkill is toggled, the command is not sent due to rfkill being asserted, and -ERFKILL is returned from iwl_trans_send_cmd(), but this is silently ignored in iwl_mvm_send_cmd() and thus the scan abort flow continues to wait for scan complete notification and fails. Since it fails, the UID to type mapping is not cleared, and thus a warning is later fired when trying to stop the interface. To fix this, modify the UMAC scan abort flow to force sending the scan abort command even when in rfkill, so stop the FW from accessing the radio etc. Signed-off-by: Ilan Peer Signed-off-by: Miri Korenblit Link: https://msgid.link/20240513132416.8cbe2f8c1a97.Iffe235c12a919dafec88eef399eb1f7bae2c5bdb@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 9739c38ef8241..b5f664ae5a17d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -3322,10 +3322,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC), - 0, sizeof(cmd), &cmd); + CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; + IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret); return ret; } From 4bb95f4535489ed830cf9b34b0a891e384d1aee4 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 13 May 2024 13:27:14 +0300 Subject: [PATCH 119/537] wifi: iwlwifi: mvm: don't read past the mfuart notifcation In case the firmware sends a notification that claims it has more data than it has, we will read past that was allocated for the notification. Remove the print of the buffer, we won't see it by default. If needed, we can see the content with tracing. This was reported by KFENCE. Fixes: bdccdb854f2f ("iwlwifi: mvm: support MFUART dump in case of MFUART assert") Signed-off-by: Emmanuel Grumbach Reviewed-by: Johannes Berg Signed-off-by: Miri Korenblit Link: https://msgid.link/20240513132416.ba82a01a559e.Ia91dd20f5e1ca1ad380b95e68aebf2794f553d9b@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index ca7fcae6995ee..f4937a100cbe9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -94,20 +94,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; - __le32 *dump_data = mfu_dump_notif->data; - int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); - int i; if (mfu_dump_notif->index_num == 0) IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", le32_to_cpu(mfu_dump_notif->assert_id)); - - for (i = 0; i < n_words; i++) - IWL_DEBUG_INFO(mvm, - "MFUART assert dump, dword %u: 0x%08x\n", - le16_to_cpu(mfu_dump_notif->index_num) * - n_words + i, - le32_to_cpu(dump_data[i])); } static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, From 0c2fd18f7ec552796179c14f13a0e06942f09d16 Mon Sep 17 00:00:00 2001 From: Lingbo Kong Date: Thu, 16 May 2024 10:18:53 +0800 Subject: [PATCH 120/537] wifi: mac80211: fix Spatial Reuse element size check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, the way to check the size of Spatial Reuse IE data in the ieee80211_parse_extension_element() is incorrect. This is because the len variable in the ieee80211_parse_extension_element() function is equal to the size of Spatial Reuse IE data minus one and the value of returned by the ieee80211_he_spr_size() function is equal to the length of Spatial Reuse IE data. So the result of the len >= ieee80211_he_spr_size(data) statement always false. To address this issue and make it consistent with the logic used elsewhere with ieee80211_he_oper_size(), change the "len >= ieee80211_he_spr_size(data)" to “len >= ieee80211_he_spr_size(data) - 1”. Fixes: 9d0480a7c05b ("wifi: mac80211: move element parsing to a new file") Signed-off-by: Lingbo Kong Link: https://msgid.link/20240516021854.5682-2-quic_lingbok@quicinc.com Signed-off-by: Johannes Berg --- net/mac80211/parse.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c index 55e5497f89781..055a60e90979b 100644 --- a/net/mac80211/parse.c +++ b/net/mac80211/parse.c @@ -111,7 +111,7 @@ ieee80211_parse_extension_element(u32 *crc, if (params->mode < IEEE80211_CONN_MODE_HE) break; if (len >= sizeof(*elems->he_spr) && - len >= ieee80211_he_spr_size(data)) + len >= ieee80211_he_spr_size(data) - 1) elems->he_spr = data; break; case WLAN_EID_EXT_HE_6GHZ_CAPA: From a26d8dc5227f449a54518a8b40733a54c6600a8b Mon Sep 17 00:00:00 2001 From: Lingbo Kong Date: Thu, 16 May 2024 10:18:54 +0800 Subject: [PATCH 121/537] wifi: mac80211: correctly parse Spatial Reuse Parameter Set element Currently, the way of parsing Spatial Reuse Parameter Set element is incorrect and some members of struct ieee80211_he_obss_pd are not assigned. To address this issue, it must be parsed in the order of the elements of Spatial Reuse Parameter Set defined in the IEEE Std 802.11ax specification. The diagram of the Spatial Reuse Parameter Set element (IEEE Std 802.11ax -2021-9.4.2.252). ------------------------------------------------------------------------- | | | | |Non-SRG| SRG | SRG | SRG | SRG | |Element|Length| Element | SR |OBSS PD|OBSS PD|OBSS PD| BSS |Partial| | ID | | ID |Control| Max | Min | Max |Color | BSSID | | | |Extension| | Offset| Offset|Offset |Bitmap|Bitmap | ------------------------------------------------------------------------- Fixes: 1ced169cc1c2 ("mac80211: allow setting spatial reuse parameters from bss_conf") Signed-off-by: Lingbo Kong Link: https://msgid.link/20240516021854.5682-3-quic_lingbok@quicinc.com Signed-off-by: Johannes Berg --- net/mac80211/he.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/mac80211/he.c b/net/mac80211/he.c index 9f5ffdc9db284..ecbb042dd0433 100644 --- a/net/mac80211/he.c +++ b/net/mac80211/he.c @@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif, if (!he_spr_ie_elem) return; + + he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control; data = he_spr_ie_elem->optional; if (he_spr_ie_elem->he_sr_control & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) - data++; + he_obss_pd->non_srg_max_offset = *data++; + if (he_spr_ie_elem->he_sr_control & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) { - he_obss_pd->max_offset = *data++; he_obss_pd->min_offset = *data++; + he_obss_pd->max_offset = *data++; + memcpy(he_obss_pd->bss_color_bitmap, data, 8); + data += 8; + memcpy(he_obss_pd->partial_bssid_bitmap, data, 8); he_obss_pd->enable = true; } } From 92ecbb3ac6f3fe8ae9edf3226c76aa17b6800699 Mon Sep 17 00:00:00 2001 From: Dmitry Antipov Date: Fri, 17 May 2024 18:33:32 +0300 Subject: [PATCH 122/537] wifi: mac80211: fix UBSAN noise in ieee80211_prep_hw_scan() When testing the previous patch with CONFIG_UBSAN_BOUNDS, I've noticed the following: UBSAN: array-index-out-of-bounds in net/mac80211/scan.c:372:4 index 0 is out of range for type 'struct ieee80211_channel *[]' CPU: 0 PID: 1435 Comm: wpa_supplicant Not tainted 6.9.0+ #1 Hardware name: LENOVO 20UN005QRT/20UN005QRT <...BIOS details...> Call Trace: dump_stack_lvl+0x2d/0x90 __ubsan_handle_out_of_bounds+0xe7/0x140 ? timerqueue_add+0x98/0xb0 ieee80211_prep_hw_scan+0x2db/0x480 [mac80211] ? __kmalloc+0xe1/0x470 __ieee80211_start_scan+0x541/0x760 [mac80211] rdev_scan+0x1f/0xe0 [cfg80211] nl80211_trigger_scan+0x9b6/0xae0 [cfg80211] ... Since '__ieee80211_start_scan()' leaves 'hw_scan_req->req.n_channels' uninitialized, actual boundaries of 'hw_scan_req->req.channels' can't be checked in 'ieee80211_prep_hw_scan()'. Although an initialization of 'hw_scan_req->req.n_channels' introduces some confusion around allocated vs. used VLA members, this shouldn't be a problem since everything is correctly adjusted soon in 'ieee80211_prep_hw_scan()'. Cleanup 'kmalloc()' math in '__ieee80211_start_scan()' by using the convenient 'struct_size()' as well. Signed-off-by: Dmitry Antipov Link: https://msgid.link/20240517153332.18271-2-dmantipov@yandex.ru [improve (imho) indentation a bit] Signed-off-by: Johannes Berg --- net/mac80211/scan.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 3da1c5c450358..8ecc4b710b0e6 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -744,15 +744,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, local->hw_scan_ies_bufsize *= n_bands; } - local->hw_scan_req = kmalloc( - sizeof(*local->hw_scan_req) + - req->n_channels * sizeof(req->channels[0]) + - local->hw_scan_ies_bufsize, GFP_KERNEL); + local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req, + req.channels, + req->n_channels) + + local->hw_scan_ies_bufsize, + GFP_KERNEL); if (!local->hw_scan_req) return -ENOMEM; local->hw_scan_req->req.ssids = req->ssids; local->hw_scan_req->req.n_ssids = req->n_ssids; + /* None of the channels are actually set + * up but let UBSAN know the boundaries. + */ + local->hw_scan_req->req.n_channels = req->n_channels; + ies = (u8 *)local->hw_scan_req + sizeof(*local->hw_scan_req) + req->n_channels * sizeof(req->channels[0]); From 84081a885394fc94055c24c727c99c321df6abac Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Fri, 3 May 2024 10:44:02 -0500 Subject: [PATCH 123/537] dt-bindings: arm: sunxi: Fix incorrect '-' usage Commit 6bc6bf8a940a ("dt-bindings: arm: sunxi: document Anbernic RG35XX handheld gaming device variants") mistakenly added '-' on each line which created empty (i.e. description only) schemas matching anything. This causes validation to fail on all the root node compatibles as there are multiple oneOf clauses passing. Fixes: 6bc6bf8a940a ("dt-bindings: arm: sunxi: document Anbernic RG35XX handheld gaming device variants") Reviewed-by: Ryan Walklin Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20240503154402.967632-1-robh@kernel.org Signed-off-by: Rob Herring (Arm) --- Documentation/devicetree/bindings/arm/sunxi.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml index c6d0d8d81ed4d..c2a158b75e497 100644 --- a/Documentation/devicetree/bindings/arm/sunxi.yaml +++ b/Documentation/devicetree/bindings/arm/sunxi.yaml @@ -57,17 +57,17 @@ properties: - const: allwinner,sun8i-v3s - description: Anbernic RG35XX (2024) - - items: + items: - const: anbernic,rg35xx-2024 - const: allwinner,sun50i-h700 - description: Anbernic RG35XX Plus - - items: + items: - const: anbernic,rg35xx-plus - const: allwinner,sun50i-h700 - description: Anbernic RG35XX H - - items: + items: - const: anbernic,rg35xx-h - const: allwinner,sun50i-h700 From 321e4fa68ce15660ec578bdec5cc9607635087cf Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Thu, 23 May 2024 10:42:07 -0500 Subject: [PATCH 124/537] dt-bindings: arm: stm32: st,mlahb: Drop spurious "reg" property from example "reg" is not documented nor used for st,mlahb, so drop it from the example to fix the warning: Documentation/devicetree/bindings/arm/stm32/st,mlahb.example.dtb: ahb@38000000: Unevaluated properties are not allowed ('reg' was unexpected) from schema $id: http://devicetree.org/schemas/arm/stm32/st,mlahb.yaml# Since "reg" is dropped, the unit-address must be as well. Acked-by: Conor Dooley Link: https://lore.kernel.org/r/20240523154208.2457864-1-robh@kernel.org Signed-off-by: Rob Herring (Arm) --- Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml index d2dce238ff5d6..3e996346b2644 100644 --- a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml +++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml @@ -54,11 +54,10 @@ unevaluatedProperties: false examples: - | - mlahb: ahb@38000000 { + ahb { compatible = "st,mlahb", "simple-bus"; #address-cells = <1>; #size-cells = <1>; - reg = <0x10000000 0x40000>; ranges; dma-ranges = <0x00000000 0x38000000 0x10000>, <0x10000000 0x10000000 0x60000>, From 2032e61e24fe9fe55d6c7a34fb5506c911b3e280 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 16 May 2024 16:27:33 +0100 Subject: [PATCH 125/537] kselftest/alsa: Ensure _GNU_SOURCE is defined The pcmtest driver tests use the kselftest harness which requires that _GNU_SOURCE is defined but nothing causes it to be defined. Since the KHDR_INCLUDES Makefile variable has had the required define added let's use that, this should provide some futureproofing. Fixes: daef47b89efd ("selftests: Compile kselftest headers with -D_GNU_SOURCE") Signed-off-by: Mark Brown Reviewed-by: Muhammad Usama Anjum Signed-off-by: Shuah Khan --- tools/testing/selftests/alsa/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile index 5af9ba8a4645b..c1ce39874e2b5 100644 --- a/tools/testing/selftests/alsa/Makefile +++ b/tools/testing/selftests/alsa/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 # -CFLAGS += $(shell pkg-config --cflags alsa) +CFLAGS += $(shell pkg-config --cflags alsa) $(KHDR_INCLUDES) LDLIBS += $(shell pkg-config --libs alsa) ifeq ($(LDLIBS),) LDLIBS += -lasound From f6c3c83db1d939ebdb8c8922748ae647d8126d91 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Tue, 21 May 2024 09:00:22 +0900 Subject: [PATCH 126/537] selftests/ftrace: Fix to check required event file The dynevent/test_duplicates.tc test case uses `syscalls/sys_enter_openat` event for defining eprobe on it. Since this `syscalls` events depend on CONFIG_FTRACE_SYSCALLS=y, if it is not set, the test will fail. Add the event file to `required` line so that the test will return `unsupported` result. Fixes: 297e1dcdca3d ("selftests/ftrace: Add selftest for testing duplicate eprobes and kprobes") Signed-off-by: Masami Hiramatsu (Google) Signed-off-by: Shuah Khan --- .../testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc index d3a79da215c8b..5f72abe6fa79b 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc @@ -1,7 +1,7 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 # description: Generic dynamic event - check if duplicate events are caught -# requires: dynamic_events "e[:[/][]] . []":README +# requires: dynamic_events "e[:[/][]] . []":README events/syscalls/sys_enter_openat echo 0 > events/enable From 7ea794604bf6a3be8bb4b0f1483eb1d3972eac93 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Tue, 21 May 2024 09:00:32 +0900 Subject: [PATCH 127/537] selftests/ftrace: Update required config Update required config options for running all tests. This also sorts the config entries alphabetically. Signed-off-by: Masami Hiramatsu (Google) Signed-off-by: Shuah Khan --- tools/testing/selftests/ftrace/config | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/ftrace/config b/tools/testing/selftests/ftrace/config index e59d985eeff0c..048a312abf405 100644 --- a/tools/testing/selftests/ftrace/config +++ b/tools/testing/selftests/ftrace/config @@ -1,16 +1,28 @@ -CONFIG_KPROBES=y +CONFIG_BPF_SYSCALL=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_EPROBE_EVENTS=y +CONFIG_FPROBE=y +CONFIG_FPROBE_EVENTS=y CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUNCTION_GRAPH_RETVAL=y CONFIG_FUNCTION_PROFILER=y -CONFIG_TRACER_SNAPSHOT=y -CONFIG_STACK_TRACER=y CONFIG_HIST_TRIGGERS=y -CONFIG_SCHED_TRACER=y -CONFIG_PREEMPT_TRACER=y CONFIG_IRQSOFF_TRACER=y -CONFIG_PREEMPTIRQ_DELAY_TEST=m +CONFIG_KALLSYMS_ALL=y +CONFIG_KPROBES=y +CONFIG_KPROBE_EVENTS=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y +CONFIG_PREEMPTIRQ_DELAY_TEST=m +CONFIG_PREEMPT_TRACER=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y CONFIG_SAMPLES=y CONFIG_SAMPLE_FTRACE_DIRECT=m CONFIG_SAMPLE_TRACE_PRINTK=m -CONFIG_KALLSYMS_ALL=y +CONFIG_SCHED_TRACER=y +CONFIG_STACK_TRACER=y +CONFIG_TRACER_SNAPSHOT=y +CONFIG_UPROBES=y +CONFIG_UPROBE_EVENTS=y From 23a4b108accc29a6125ed14de4a044689ffeda78 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Mon, 20 May 2024 20:57:37 -0400 Subject: [PATCH 128/537] tracing/selftests: Fix kprobe event name test for .isra. functions The kprobe_eventname.tc test checks if a function with .isra. can have a kprobe attached to it. It loops through the kallsyms file for all the functions that have the .isra. name, and checks if it exists in the available_filter_functions file, and if it does, it uses it to attach a kprobe to it. The issue is that kprobes can not attach to functions that are listed more than once in available_filter_functions. With the latest kernel, the function that is found is: rapl_event_update.isra.0 # grep rapl_event_update.isra.0 /sys/kernel/tracing/available_filter_functions rapl_event_update.isra.0 rapl_event_update.isra.0 It is listed twice. This causes the attached kprobe to it to fail which in turn fails the test. Instead of just picking the function function that is found in available_filter_functions, pick the first one that is listed only once in available_filter_functions. Cc: stable@vger.kernel.org Fixes: 604e3548236d ("selftests/ftrace: Select an existing function in kprobe_eventname test") Signed-off-by: Steven Rostedt (Google) Acked-by: Masami Hiramatsu (Google) Signed-off-by: Shuah Khan --- .../testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc index 1f6981ef7afa0..ba19b81cef39a 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc @@ -30,7 +30,8 @@ find_dot_func() { fi grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do - if grep -s $f available_filter_functions; then + cnt=`grep -s $f available_filter_functions | wc -l`; + if [ $cnt -eq 1 ]; then echo $f break fi From bc4d5f5d2debf8bb65fba188313481549ead8576 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 21 May 2024 13:01:11 +1000 Subject: [PATCH 129/537] selftests: cachestat: Fix build warnings on ppc64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix warnings like: test_cachestat.c: In function ‘print_cachestat’: test_cachestat.c:30:38: warning: format ‘%llu’ expects argument of type ‘long long unsigned int’, but argument 2 has type ‘__u64’ {aka ‘long unsigned int’} [-Wformat=] By switching to unsigned long long for u64 for ppc64 builds. Signed-off-by: Michael Ellerman Signed-off-by: Shuah Khan --- tools/testing/selftests/cachestat/test_cachestat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c index b171fd53b004e..632ab44737ec3 100644 --- a/tools/testing/selftests/cachestat/test_cachestat.c +++ b/tools/testing/selftests/cachestat/test_cachestat.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE +#define __SANE_USERSPACE_TYPES__ // Use ll64 #include #include From 84b6df4c49a1cc2854a16937acd5fd3e6315d083 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 21 May 2024 13:03:25 +1000 Subject: [PATCH 130/537] selftests/openat2: Fix build warnings on ppc64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix warnings like: openat2_test.c: In function ‘test_openat2_flags’: openat2_test.c:303:73: warning: format ‘%llX’ expects argument of type ‘long long unsigned int’, but argument 5 has type ‘__u64’ {aka ‘long unsigned int’} [-Wformat=] By switching to unsigned long long for u64 for ppc64 builds. Signed-off-by: Michael Ellerman Reviewed-by: Muhammad Usama Anjum Signed-off-by: Shuah Khan --- tools/testing/selftests/openat2/openat2_test.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c index 9024754530b23..5790ab446527f 100644 --- a/tools/testing/selftests/openat2/openat2_test.c +++ b/tools/testing/selftests/openat2/openat2_test.c @@ -5,6 +5,7 @@ */ #define _GNU_SOURCE +#define __SANE_USERSPACE_TYPES__ // Use ll64 #include #include #include From e8b8c5264d4ebd248f60a5cef077fe615806e7a0 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 21 May 2024 12:26:16 +1000 Subject: [PATCH 131/537] selftests/overlayfs: Fix build error on ppc64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix build error on ppc64: dev_in_maps.c: In function ‘get_file_dev_and_inode’: dev_in_maps.c:60:59: error: format ‘%llu’ expects argument of type ‘long long unsigned int *’, but argument 7 has type ‘__u64 *’ {aka ‘long unsigned int *’} [-Werror=format=] By switching to unsigned long long for u64 for ppc64 builds. Signed-off-by: Michael Ellerman Signed-off-by: Shuah Khan --- tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c index 759f86e7d263e..2862aae58b79a 100644 --- a/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c +++ b/tools/testing/selftests/filesystems/overlayfs/dev_in_maps.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #define _GNU_SOURCE +#define __SANE_USERSPACE_TYPES__ // Use ll64 #include #include From 2607133196c35f31892ee199ce7ffa717bea4ad1 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Mon, 27 May 2024 17:14:12 -0700 Subject: [PATCH 132/537] clk: sifive: Do not register clkdevs for PRCI clocks These clkdevs were unnecessary, because systems using this driver always look up clocks using the devicetree. And as Russell King points out[1], since the provided device name was truncated, lookups via clkdev would never match. Recently, commit 8d532528ff6a ("clkdev: report over-sized strings when creating clkdev entries") caused clkdev registration to fail due to the truncation, and this now prevents the driver from probing. Fix the driver by removing the clkdev registration. Link: https://lore.kernel.org/linux-clk/ZkfYqj+OcAxd9O2t@shell.armlinux.org.uk/ [1] Fixes: 30b8e27e3b58 ("clk: sifive: add a driver for the SiFive FU540 PRCI IP block") Fixes: 8d532528ff6a ("clkdev: report over-sized strings when creating clkdev entries") Reported-by: Guenter Roeck Closes: https://lore.kernel.org/linux-clk/7eda7621-0dde-4153-89e4-172e4c095d01@roeck-us.net/ Suggested-by: Russell King Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20240528001432.1200403-1-samuel.holland@sifive.com Signed-off-by: Stephen Boyd --- drivers/clk/sifive/sifive-prci.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c index 25b8e1a80ddce..b32a59fe55e74 100644 --- a/drivers/clk/sifive/sifive-prci.c +++ b/drivers/clk/sifive/sifive-prci.c @@ -4,7 +4,6 @@ * Copyright (C) 2020 Zong Li */ -#include #include #include #include @@ -537,13 +536,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd, return r; } - r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev)); - if (r) { - dev_warn(dev, "Failed to register clkdev for %s: %d\n", - init.name, r); - return r; - } - pd->hw_clks.hws[i] = &pic->hw; } From ed8c7fbdfe117abbef81f65428ba263118ef298a Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Thu, 30 May 2024 00:06:56 +0800 Subject: [PATCH 133/537] fs/file: fix the check in find_next_fd() The maximum possible return value of find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) is maxbit. This return value, multiplied by BITS_PER_LONG, gives the value of bitbit, which can never be greater than maxfd, it can only be equal to maxfd at most, so the following check 'if (bitbit > maxfd)' will never be true. Moreover, when bitbit equals maxfd, it indicates that there are no unused fds, and the function can directly return. Fix this check. Signed-off-by: Yuntao Wang Link: https://lore.kernel.org/r/20240529160656.209352-1-yuntao.wang@linux.dev Reviewed-by: Jan Kara Signed-off-by: Christian Brauner --- fs/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/file.c b/fs/file.c index 8076aef9c2101..a3b72aa64f116 100644 --- a/fs/file.c +++ b/fs/file.c @@ -486,12 +486,12 @@ struct files_struct init_files = { static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start) { - unsigned int maxfd = fdt->max_fds; + unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */ unsigned int maxbit = maxfd / BITS_PER_LONG; unsigned int bitbit = start / BITS_PER_LONG; bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG; - if (bitbit > maxfd) + if (bitbit >= maxfd) return maxfd; if (bitbit > start) start = bitbit; From 328af04b1ac279f17c09ddd5958aa6ec47131c80 Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Mon, 27 May 2024 23:43:38 +0300 Subject: [PATCH 134/537] media: mei: csi: Put the IPU device reference The mei csi's probe function obtains a reference to the IPU device but never puts that reference. Do that now. Fixes: 33116eb12c6b ("media: ivsc: csi: Use IPU bridge") Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- drivers/media/pci/intel/ivsc/mei_csi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c index 89b582a221ab4..53151d0161882 100644 --- a/drivers/media/pci/intel/ivsc/mei_csi.c +++ b/drivers/media/pci/intel/ivsc/mei_csi.c @@ -677,6 +677,7 @@ static int mei_csi_probe(struct mei_cl_device *cldev, return -ENODEV; ret = ipu_bridge_init(&ipu->dev, ipu_bridge_parse_ssdb); + put_device(&ipu->dev); if (ret < 0) return ret; if (WARN_ON(!dev_fwnode(dev))) From cc864821c7e8b921ebbfb21b17c92f8b3ea3d7ff Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Mon, 27 May 2024 23:13:27 +0300 Subject: [PATCH 135/537] media: mei: csi: Warn less verbosely of a missing device fwnode The check for having device fwnode was meant to be a sanity check but this also happens if the ACPI DSDT has graph port nodes on sensor device(s) but not on the IVSC device. Use a more meaningful warning message to tell about this. Fixes: 33116eb12c6b ("media: ivsc: csi: Use IPU bridge") Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- drivers/media/pci/intel/ivsc/mei_csi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c index 53151d0161882..f04a89584334b 100644 --- a/drivers/media/pci/intel/ivsc/mei_csi.c +++ b/drivers/media/pci/intel/ivsc/mei_csi.c @@ -680,8 +680,10 @@ static int mei_csi_probe(struct mei_cl_device *cldev, put_device(&ipu->dev); if (ret < 0) return ret; - if (WARN_ON(!dev_fwnode(dev))) + if (!dev_fwnode(dev)) { + dev_err(dev, "mei-csi probed without device fwnode!\n"); return -ENXIO; + } csi = devm_kzalloc(dev, sizeof(struct mei_csi), GFP_KERNEL); if (!csi) From 54880795b494e6557b47fe3cf3f21f2f54c2aa68 Mon Sep 17 00:00:00 2001 From: Bingbu Cao Date: Wed, 29 May 2024 11:44:32 +0800 Subject: [PATCH 136/537] media: intel/ipu6: update the maximum supported csi2 port number to 6 IPU6EP on Meteor Lake SoC supports maximum 6 csi2 ports instead of 4. Fixes: 25fedc021985 ("media: intel/ipu6: add Intel IPU6 PCI device driver") Signed-off-by: Bingbu Cao Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- drivers/media/pci/intel/ipu6/ipu6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/pci/intel/ipu6/ipu6.c b/drivers/media/pci/intel/ipu6/ipu6.c index f587f609259d7..bbd646378ab3e 100644 --- a/drivers/media/pci/intel/ipu6/ipu6.c +++ b/drivers/media/pci/intel/ipu6/ipu6.c @@ -285,7 +285,7 @@ EXPORT_SYMBOL_NS_GPL(ipu6_configure_spc, INTEL_IPU6); #define IPU6_ISYS_CSI2_NPORTS 4 #define IPU6SE_ISYS_CSI2_NPORTS 4 #define IPU6_TGL_ISYS_CSI2_NPORTS 8 -#define IPU6EP_MTL_ISYS_CSI2_NPORTS 4 +#define IPU6EP_MTL_ISYS_CSI2_NPORTS 6 static void ipu6_internal_pdata_init(struct ipu6_device *isp) { From ffb9072bce200a4d004006e8b40c366933cf517b Mon Sep 17 00:00:00 2001 From: Bingbu Cao Date: Wed, 29 May 2024 14:43:21 +0800 Subject: [PATCH 137/537] media: intel/ipu6: add csi2 port sanity check in notifier bound Invalid csi2 port will break the isys notifier bound ops as it is trying to access an invalid csi2 sub-device instance based on the port. It will trigger a mc warning, and it will cause the sensor driver to unbound an inexistent isys csi2 and crash. Adding a csi2 port sanity check, return error to avoid such case. Fixes: f50c4ca0a820 ("media: intel/ipu6: add the main input system driver") Signed-off-by: Bingbu Cao [Sakari Ailus: Fix spelling of "nports" field.] Signed-off-by: Sakari Ailus Signed-off-by: Hans Verkuil --- drivers/media/pci/intel/ipu6/ipu6-isys.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys.c b/drivers/media/pci/intel/ipu6/ipu6-isys.c index 1998b72ac07de..8b9b77719bb16 100644 --- a/drivers/media/pci/intel/ipu6/ipu6-isys.c +++ b/drivers/media/pci/intel/ipu6/ipu6-isys.c @@ -678,6 +678,12 @@ static int isys_notifier_bound(struct v4l2_async_notifier *notifier, container_of(asc, struct sensor_async_sd, asc); int ret; + if (s_asd->csi2.port >= isys->pdata->ipdata->csi2.nports) { + dev_err(&isys->adev->auxdev.dev, "invalid csi2 port %u\n", + s_asd->csi2.port); + return -EINVAL; + } + ret = ipu_bridge_instantiate_vcm(sd->dev); if (ret) { dev_err(&isys->adev->auxdev.dev, "instantiate vcm failed\n"); From 41011e2de3480f9adb6420b35b67628b2d903355 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 28 May 2024 11:06:31 +0100 Subject: [PATCH 138/537] KVM: arm64: nv: Fix relative priorities of exceptions generated by ERETAx ERETAx can fail in multiple ways: (1) ELR_EL2 points lalaland (2) we get a PAC failure (3) SPSR_EL2 has the wrong mode (1) is easy, as we just let the CPU do its thing and deliver an Instruction Abort. However, (2) and (3) are interesting, because the PAC failure priority is way below that of the Illegal Execution State exception. Which means that if we have detected a PAC failure (and that we have FPACCOMBINE), we must be careful to give priority to the Illegal Execution State exception, should one be pending. Solving this involves hoisting the SPSR calculation earlier and testing for the IL bit before injecting the FPAC exception. In the extreme case of a ERETAx returning to an invalid mode *and* failing its PAC check, we end up with an Instruction Abort (due to the new PC being mangled by the failed Auth) *and* PSTATE.IL being set. Which matches the requirements of the architecture. Whilst we're at it, remove a stale comment that states the obvious and only confuses the reader. Fixes: 213b3d1ea161 ("KVM: arm64: nv: Handle ERETA[AB] instructions") Reviewed-by: Joey Gouly Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20240528100632.1831995-2-maz@kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/kvm/emulate-nested.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 72d733c74a382..54090967a3356 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -2181,16 +2181,23 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu) if (forward_traps(vcpu, HCR_NV)) return; + spsr = vcpu_read_sys_reg(vcpu, SPSR_EL2); + spsr = kvm_check_illegal_exception_return(vcpu, spsr); + /* Check for an ERETAx */ esr = kvm_vcpu_get_esr(vcpu); if (esr_iss_is_eretax(esr) && !kvm_auth_eretax(vcpu, &elr)) { /* - * Oh no, ERETAx failed to authenticate. If we have - * FPACCOMBINE, deliver an exception right away. If we - * don't, then let the mangled ELR value trickle down the + * Oh no, ERETAx failed to authenticate. + * + * If we have FPACCOMBINE and we don't have a pending + * Illegal Execution State exception (which has priority + * over FPAC), deliver an exception right away. + * + * Otherwise, let the mangled ELR value trickle down the * ERET handling, and the guest will have a little surprise. */ - if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE)) { + if (kvm_has_pauth(vcpu->kvm, FPACCOMBINE) && !(spsr & PSR_IL_BIT)) { esr &= ESR_ELx_ERET_ISS_ERETA; esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_FPAC); kvm_inject_nested_sync(vcpu, esr); @@ -2201,17 +2208,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu) preempt_disable(); kvm_arch_vcpu_put(vcpu); - spsr = __vcpu_sys_reg(vcpu, SPSR_EL2); - spsr = kvm_check_illegal_exception_return(vcpu, spsr); if (!esr_iss_is_eretax(esr)) elr = __vcpu_sys_reg(vcpu, ELR_EL2); trace_kvm_nested_eret(vcpu, elr, spsr); - /* - * Note that the current exception level is always the virtual EL2, - * since we set HCR_EL2.NV bit only when entering the virtual EL2. - */ *vcpu_pc(vcpu) = elr; *vcpu_cpsr(vcpu) = spsr; From 47eb2d68d10208e6a9e89d10b66018e8d6ca0623 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 28 May 2024 11:06:32 +0100 Subject: [PATCH 139/537] KVM: arm64: nv: Expose BTI and CSV_frac to a guest hypervisor Now that we expose PAC to NV guests, we can also expose BTI (as the two as joined at the hip, due to some of the PAC instructions being landing pads). While we're at it, also propagate CSV_frac, which requires no particular emulation. Fixes: f4f6a95bac49 ("KVM: arm64: nv: Advertise support for PAuth") Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20240528100632.1831995-3-maz@kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/kvm/nested.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 6813c7c7f00ab..bae8536cbf003 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -58,8 +58,10 @@ static u64 limit_nv_id_reg(u32 id, u64 val) break; case SYS_ID_AA64PFR1_EL1: - /* Only support SSBS */ - val &= NV_FTR(PFR1, SSBS); + /* Only support BTI, SSBS, CSV2_frac */ + val &= (NV_FTR(PFR1, BT) | + NV_FTR(PFR1, SSBS) | + NV_FTR(PFR1, CSV2_frac)); break; case SYS_ID_AA64MMFR0_EL1: From cb708ab9f584f159798b60853edcf0c8b67ce295 Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Tue, 28 May 2024 19:29:38 -0700 Subject: [PATCH 140/537] selftests/futex: pass _GNU_SOURCE without a value to the compiler It's slightly better to set _GNU_SOURCE in the source code, but if one must do it via the compiler invocation, then the best way to do so is this: $(CC) -D_GNU_SOURCE= ...because otherwise, if this form is used: $(CC) -D_GNU_SOURCE ...then that leads the compiler to set a value, as if you had passed in: $(CC) -D_GNU_SOURCE=1 That, in turn, leads to warnings under both gcc and clang, like this: futex_requeue_pi.c:20: warning: "_GNU_SOURCE" redefined Fix this by using the "-D_GNU_SOURCE=" form. Reviewed-by: Edward Liaw Reviewed-by: Davidlohr Bueso Signed-off-by: John Hubbard Signed-off-by: Shuah Khan --- tools/testing/selftests/futex/functional/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index a392d0917b4e5..994fa3468f170 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 INCLUDES := -I../include -I../../ $(KHDR_INCLUDES) -CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES) +CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE= -pthread $(INCLUDES) $(KHDR_INCLUDES) LDLIBS := -lpthread -lrt LOCAL_HDRS := \ From 296f4ce81d08e73c22408c49f4938a85bd075e5c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 29 May 2024 09:11:36 +1000 Subject: [PATCH 141/537] NFS: abort nfs_atomic_open_v23 if name is too long. An attempt to open a file with a name longer than NFS3_MAXNAMLEN will trigger a WARN_ON_ONCE in encode_filename3() because nfs_atomic_open_v23() doesn't have the test on ->d_name.len that nfs_atomic_open() has. So add that test. Reported-by: James Clark Closes: https://lore.kernel.org/all/20240528105249.69200-1-james.clark@arm.com/ Fixes: 7c6c5249f061 ("NFS: add atomic_open for NFSv3 to handle O_TRUNC correctly.") Signed-off-by: NeilBrown Reviewed-by: Christoph Hellwig Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 788077a4feb9a..2b68a14982c8f 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2254,6 +2254,9 @@ int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry, */ int error = 0; + if (dentry->d_name.len > NFS_SERVER(dir)->namelen) + return -ENAMETOOLONG; + if (open_flags & O_CREAT) { file->f_mode |= FMODE_CREATED; error = nfs_do_create(dir, dentry, mode, open_flags); From 28568c906c1bb5f7560e18082ed7d6295860f1c2 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Wed, 29 May 2024 15:44:35 -0400 Subject: [PATCH 142/537] NFSv4.1 enforce rootpath check in fs_location query In commit 4ca9f31a2be66 ("NFSv4.1 test and add 4.1 trunking transport"), we introduce the ability to query the NFS server for possible trunking locations of the existing filesystem. However, we never checked the returned file system path for these alternative locations. According to the RFC, the server can say that the filesystem currently known under "fs_root" of fs_location also resides under these server locations under the following "rootpath" pathname. The client cannot handle trunking a filesystem that reside under different location under different paths other than what the main path is. This patch enforces the check that fs_root path and rootpath path in fs_location reply is the same. Fixes: 4ca9f31a2be6 ("NFSv4.1 test and add 4.1 trunking transport") Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 94c07875aa3f4..a691fa10b3e95 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4023,6 +4023,23 @@ static void test_fs_location_for_trunking(struct nfs4_fs_location *location, } } +static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1, + struct nfs4_pathname *path2) +{ + int i; + + if (path1->ncomponents != path2->ncomponents) + return false; + for (i = 0; i < path1->ncomponents; i++) { + if (path1->components[i].len != path2->components[i].len) + return false; + if (memcmp(path1->components[i].data, path2->components[i].data, + path1->components[i].len)) + return false; + } + return true; +} + static int _nfs4_discover_trunking(struct nfs_server *server, struct nfs_fh *fhandle) { @@ -4056,9 +4073,13 @@ static int _nfs4_discover_trunking(struct nfs_server *server, if (status) goto out_free_3; - for (i = 0; i < locations->nlocations; i++) + for (i = 0; i < locations->nlocations; i++) { + if (!_is_same_nfs4_pathname(&locations->fs_path, + &locations->locations[i].rootpath)) + continue; test_fs_location_for_trunking(&locations->locations[i], clp, server); + } out_free_3: kfree(locations->fattr); out_free_2: From 33c94d7e3cb84f6d130678d6d59ba475a6c489cf Mon Sep 17 00:00:00 2001 From: Chen Hanxiao Date: Thu, 23 May 2024 16:47:16 +0800 Subject: [PATCH 143/537] SUNRPC: return proper error from gss_wrap_req_priv don't return 0 if snd_buf->len really greater than snd_buf->buflen Signed-off-by: Chen Hanxiao Fixes: 0c77668ddb4e ("SUNRPC: Introduce trace points in rpc_auth_gss.ko") Reviewed-by: Benjamin Coddington Reviewed-by: Chuck Lever Signed-off-by: Trond Myklebust --- net/sunrpc/auth_gss/auth_gss.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index c7af0220f82f4..369310909fc98 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1875,8 +1875,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); /* slack space should prevent this ever happening: */ - if (unlikely(snd_buf->len > snd_buf->buflen)) + if (unlikely(snd_buf->len > snd_buf->buflen)) { + status = -EIO; goto wrap_failed; + } /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was * done anyway, so it's safe to put the request on the wire: */ if (maj_stat == GSS_S_CONTEXT_EXPIRED) From 99bc9f2eb3f79a2b4296d9bf43153e1d10ca50d3 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 28 May 2024 13:27:17 +1000 Subject: [PATCH 144/537] NFS: add barriers when testing for NFS_FSDATA_BLOCKED dentry->d_fsdata is set to NFS_FSDATA_BLOCKED while unlinking or renaming-over a file to ensure that no open succeeds while the NFS operation progressed on the server. Setting dentry->d_fsdata to NFS_FSDATA_BLOCKED is done under ->d_lock after checking the refcount is not elevated. Any attempt to open the file (through that name) will go through lookp_open() which will take ->d_lock while incrementing the refcount, we can be sure that once the new value is set, __nfs_lookup_revalidate() *will* see the new value and will block. We don't have any locking guarantee that when we set ->d_fsdata to NULL, the wait_var_event() in __nfs_lookup_revalidate() will notice. wait/wake primitives do NOT provide barriers to guarantee order. We must use smp_load_acquire() in wait_var_event() to ensure we look at an up-to-date value, and must use smp_store_release() before wake_up_var(). This patch adds those barrier functions and factors out block_revalidate() and unblock_revalidate() far clarity. There is also a hypothetical bug in that if memory allocation fails (which never happens in practice) we might leave ->d_fsdata locked. This patch adds the missing call to unblock_revalidate(). Reported-and-tested-by: Richard Kojedzinszky Closes: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1071501 Fixes: 3c59366c207e ("NFS: don't unhash dentry during unlink/rename") Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 47 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 2b68a14982c8f..07a7be27182e5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1803,9 +1803,10 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags, if (parent != READ_ONCE(dentry->d_parent)) return -ECHILD; } else { - /* Wait for unlink to complete */ + /* Wait for unlink to complete - see unblock_revalidate() */ wait_var_event(&dentry->d_fsdata, - dentry->d_fsdata != NFS_FSDATA_BLOCKED); + smp_load_acquire(&dentry->d_fsdata) + != NFS_FSDATA_BLOCKED); parent = dget_parent(dentry); ret = reval(d_inode(parent), dentry, flags); dput(parent); @@ -1818,6 +1819,29 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate); } +static void block_revalidate(struct dentry *dentry) +{ + /* old devname - just in case */ + kfree(dentry->d_fsdata); + + /* Any new reference that could lead to an open + * will take ->d_lock in lookup_open() -> d_lookup(). + * Holding this lock ensures we cannot race with + * __nfs_lookup_revalidate() and removes and need + * for further barriers. + */ + lockdep_assert_held(&dentry->d_lock); + + dentry->d_fsdata = NFS_FSDATA_BLOCKED; +} + +static void unblock_revalidate(struct dentry *dentry) +{ + /* store_release ensures wait_var_event() sees the update */ + smp_store_release(&dentry->d_fsdata, NULL); + wake_up_var(&dentry->d_fsdata); +} + /* * A weaker form of d_revalidate for revalidating just the d_inode(dentry) * when we don't really care about the dentry name. This is called when a @@ -2551,15 +2575,12 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry) spin_unlock(&dentry->d_lock); goto out; } - /* old devname */ - kfree(dentry->d_fsdata); - dentry->d_fsdata = NFS_FSDATA_BLOCKED; + block_revalidate(dentry); spin_unlock(&dentry->d_lock); error = nfs_safe_remove(dentry); nfs_dentry_remove_handle_error(dir, dentry, error); - dentry->d_fsdata = NULL; - wake_up_var(&dentry->d_fsdata); + unblock_revalidate(dentry); out: trace_nfs_unlink_exit(dir, dentry, error); return error; @@ -2666,8 +2687,7 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data) { struct dentry *new_dentry = data->new_dentry; - new_dentry->d_fsdata = NULL; - wake_up_var(&new_dentry->d_fsdata); + unblock_revalidate(new_dentry); } /* @@ -2729,11 +2749,6 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) || WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED)) goto out; - if (new_dentry->d_fsdata) { - /* old devname */ - kfree(new_dentry->d_fsdata); - new_dentry->d_fsdata = NULL; - } spin_lock(&new_dentry->d_lock); if (d_count(new_dentry) > 2) { @@ -2755,7 +2770,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, new_dentry = dentry; new_inode = NULL; } else { - new_dentry->d_fsdata = NFS_FSDATA_BLOCKED; + block_revalidate(new_dentry); must_unblock = true; spin_unlock(&new_dentry->d_lock); } @@ -2767,6 +2782,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, must_unblock ? nfs_unblock_rename : NULL); if (IS_ERR(task)) { + if (must_unblock) + unblock_revalidate(new_dentry); error = PTR_ERR(task); goto out; } From d53b681ce9ca7db5ef4ecb8d2cf465ae4a031264 Mon Sep 17 00:00:00 2001 From: Chanwoo Lee Date: Fri, 24 May 2024 10:59:04 +0900 Subject: [PATCH 145/537] scsi: ufs: mcq: Fix error output and clean up ufshcd_mcq_abort() An error unrelated to ufshcd_try_to_abort_task is being logged and can cause confusion. Modify ufshcd_mcq_abort() to print the result of the abort failure. For readability, return immediately instead of 'goto'. Fixes: f1304d442077 ("scsi: ufs: mcq: Added ufshcd_mcq_abort()") Signed-off-by: Chanwoo Lee Link: https://lore.kernel.org/r/20240524015904.1116005-1-cw9316.lee@samsung.com Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 005d63ab1f441..8944548c30fa1 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -634,20 +634,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; unsigned long flags; - int err = FAILED; + int err; if (!ufshcd_cmd_inflight(lrbp->cmd)) { dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n", __func__, tag); - goto out; + return FAILED; } /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", __func__, tag); - goto out; + return FAILED; } hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); @@ -659,7 +659,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) */ dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n", __func__, hwq->id, tag); - goto out; + return FAILED; } /* @@ -667,18 +667,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd) * in the completion queue either. Query the device to see if * the command is being processed in the device. */ - if (ufshcd_try_to_abort_task(hba, tag)) { + err = ufshcd_try_to_abort_task(hba, tag); + if (err) { dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err); lrbp->req_abort_skip = true; - goto out; + return FAILED; } - err = SUCCESS; spin_lock_irqsave(&hwq->cq_lock, flags); if (ufshcd_cmd_inflight(lrbp->cmd)) ufshcd_release_scsi_cmd(hba, lrbp); spin_unlock_irqrestore(&hwq->cq_lock, flags); -out: - return err; + return SUCCESS; } From 935df1bd40d43c4ee91838c42a20e9af751885cc Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Wed, 29 May 2024 14:59:20 -0500 Subject: [PATCH 146/537] of/irq: Factor out parsing of interrupt-map parent phandle+args from of_irq_parse_raw() Factor out the parsing of interrupt-map interrupt parent phandle and its arg cells to a separate function, of_irq_parse_imap_parent(), so that it can be used in other parsing scenarios (e.g. fw_devlink). There was a refcount leak on non-matching entries when iterating thru "interrupt-map" which is fixed. Tested-by: Marc Zyngier Tested-by: Anup Patel Link: https://lore.kernel.org/r/20240529-dt-interrupt-map-fix-v2-1-ef86dc5bcd2a@kernel.org Signed-off-by: Rob Herring (Arm) --- drivers/of/irq.c | 125 ++++++++++++++++++++++++---------------- drivers/of/of_private.h | 3 + 2 files changed, 77 insertions(+), 51 deletions(-) diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 174900072c18c..462375b293e47 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -25,6 +25,8 @@ #include #include +#include "of_private.h" + /** * irq_of_parse_and_map - Parse and map an interrupt into linux virq space * @dev: Device node of the device whose interrupt is to be mapped @@ -96,6 +98,57 @@ static const char * const of_irq_imap_abusers[] = { NULL, }; +const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_phandle_args *out_irq) +{ + u32 intsize, addrsize; + struct device_node *np; + + /* Get the interrupt parent */ + if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) + np = of_node_get(of_irq_dflt_pic); + else + np = of_find_node_by_phandle(be32_to_cpup(imap)); + imap++; + + /* Check if not found */ + if (!np) { + pr_debug(" -> imap parent not found !\n"); + return NULL; + } + + /* Get #interrupt-cells and #address-cells of new parent */ + if (of_property_read_u32(np, "#interrupt-cells", + &intsize)) { + pr_debug(" -> parent lacks #interrupt-cells!\n"); + of_node_put(np); + return NULL; + } + if (of_property_read_u32(np, "#address-cells", + &addrsize)) + addrsize = 0; + + pr_debug(" -> intsize=%d, addrsize=%d\n", + intsize, addrsize); + + /* Check for malformed properties */ + if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS) + || (len < (addrsize + intsize))) { + of_node_put(np); + return NULL; + } + + pr_debug(" -> imaplen=%d\n", len); + + imap += addrsize + intsize; + + out_irq->np = np; + for (int i = 0; i < intsize; i++) + out_irq->args[i] = be32_to_cpup(imap - intsize + i); + out_irq->args_count = intsize; + + return imap; +} + /** * of_irq_parse_raw - Low level interrupt tree parsing * @addr: address specifier (start of "reg" property of the device) in be32 format @@ -112,12 +165,12 @@ static const char * const of_irq_imap_abusers[] = { */ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) { - struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; + struct device_node *ipar, *tnode, *old = NULL; __be32 initial_match_array[MAX_PHANDLE_ARGS]; const __be32 *match_array = initial_match_array; - const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; - u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; - int imaplen, match, i, rc = -EINVAL; + const __be32 *tmp, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; + u32 intsize = 1, addrsize; + int i, rc = -EINVAL; #ifdef DEBUG of_print_phandle_args("of_irq_parse_raw: ", out_irq); @@ -176,6 +229,9 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) /* Now start the actual "proper" walk of the interrupt tree */ while (ipar != NULL) { + int imaplen, match; + const __be32 *imap, *oldimap, *imask; + struct device_node *newpar; /* * Now check if cursor is an interrupt-controller and * if it is then we are done, unless there is an @@ -216,7 +272,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) /* Parse interrupt-map */ match = 0; - while (imaplen > (addrsize + intsize + 1) && !match) { + while (imaplen > (addrsize + intsize + 1)) { /* Compare specifiers */ match = 1; for (i = 0; i < (addrsize + intsize); i++, imaplen--) @@ -224,48 +280,17 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen); - /* Get the interrupt parent */ - if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) - newpar = of_node_get(of_irq_dflt_pic); - else - newpar = of_find_node_by_phandle(be32_to_cpup(imap)); - imap++; - --imaplen; - - /* Check if not found */ - if (newpar == NULL) { - pr_debug(" -> imap parent not found !\n"); - goto fail; - } - - if (!of_device_is_available(newpar)) - match = 0; - - /* Get #interrupt-cells and #address-cells of new - * parent - */ - if (of_property_read_u32(newpar, "#interrupt-cells", - &newintsize)) { - pr_debug(" -> parent lacks #interrupt-cells!\n"); - goto fail; - } - if (of_property_read_u32(newpar, "#address-cells", - &newaddrsize)) - newaddrsize = 0; - - pr_debug(" -> newintsize=%d, newaddrsize=%d\n", - newintsize, newaddrsize); - - /* Check for malformed properties */ - if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS) - || (imaplen < (newaddrsize + newintsize))) { - rc = -EFAULT; + oldimap = imap; + imap = of_irq_parse_imap_parent(oldimap, imaplen, out_irq); + if (!imap) goto fail; - } - imap += newaddrsize + newintsize; - imaplen -= newaddrsize + newintsize; + match &= of_device_is_available(out_irq->np); + if (match) + break; + of_node_put(out_irq->np); + imaplen -= imap - oldimap; pr_debug(" -> imaplen=%d\n", imaplen); } if (!match) { @@ -287,11 +312,11 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) * Successfully parsed an interrupt-map translation; copy new * interrupt specifier into the out_irq structure */ - match_array = imap - newaddrsize - newintsize; - for (i = 0; i < newintsize; i++) - out_irq->args[i] = be32_to_cpup(imap - newintsize + i); - out_irq->args_count = intsize = newintsize; - addrsize = newaddrsize; + match_array = oldimap + 1; + + newpar = out_irq->np; + intsize = out_irq->args_count; + addrsize = (imap - match_array) - intsize; if (ipar == newpar) { pr_debug("%pOF interrupt-map entry to self\n", ipar); @@ -300,7 +325,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) skiplevel: /* Iterate again with new parent */ - out_irq->np = newpar; pr_debug(" -> new parent: %pOF\n", newpar); of_node_put(ipar); ipar = newpar; @@ -310,7 +334,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) fail: of_node_put(ipar); - of_node_put(newpar); return rc; } diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h index 94fc0aa07af9e..04aa2a91f851a 100644 --- a/drivers/of/of_private.h +++ b/drivers/of/of_private.h @@ -159,6 +159,9 @@ extern void __of_sysfs_remove_bin_file(struct device_node *np, extern int of_bus_n_addr_cells(struct device_node *np); extern int of_bus_n_size_cells(struct device_node *np); +const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, + struct of_phandle_args *out_irq); + struct bus_dma_region; #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA) int of_dma_get_range(struct device_node *np, From e7985f43609c782132f8f5794ee6cc4cdb66ca75 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 29 May 2024 14:59:21 -0500 Subject: [PATCH 147/537] of: property: Fix fw_devlink handling of interrupt-map Commit d976c6f4b32c ("of: property: Add fw_devlink support for interrupt-map property") tried to do what it says on the tin, but failed on a couple of points: - it confuses bytes and cells. Not a huge deal, except when it comes to pointer arithmetic - it doesn't really handle anything but interrupt-maps that have their parent #address-cells set to 0 The combinations of the two leads to some serious fun on my M1 box, with plenty of WARN-ON() firing all over the shop, and amusing values being generated for interrupt specifiers. Having 2 versions of parsing code for "interrupt-map" was a bad idea. Now that the common parsing parts have been refactored into of_irq_parse_imap_parent(), rework the code here to use it instead and fix the pointer arithmetic. Note that the dependency will be a bit different than the original code when the interrupt-map points to another interrupt-map. In this case, the original code would resolve to the final interrupt controller. Now the dependency is the parent interrupt-map (which itself should have a dependency to the parent). It is possible that a node with an interrupt-map has no driver. Fixes: d976c6f4b32c ("of: property: Add fw_devlink support for interrupt-map property") Signed-off-by: Marc Zyngier Co-developed-by: Rob Herring (Arm) Cc: Saravana Kannan Tested-by: Marc Zyngier Tested-by: Anup Patel Link: https://lore.kernel.org/r/20240529-dt-interrupt-map-fix-v2-2-ef86dc5bcd2a@kernel.org Signed-off-by: Rob Herring (Arm) --- drivers/of/property.c | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/drivers/of/property.c b/drivers/of/property.c index 1c83e68f805ba..164d77cb94458 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1306,10 +1306,10 @@ static struct device_node *parse_interrupts(struct device_node *np, static struct device_node *parse_interrupt_map(struct device_node *np, const char *prop_name, int index) { - const __be32 *imap, *imap_end, *addr; + const __be32 *imap, *imap_end; struct of_phandle_args sup_args; u32 addrcells, intcells; - int i, imaplen; + int imaplen; if (!IS_ENABLED(CONFIG_OF_IRQ)) return NULL; @@ -1322,33 +1322,23 @@ static struct device_node *parse_interrupt_map(struct device_node *np, addrcells = of_bus_n_addr_cells(np); imap = of_get_property(np, "interrupt-map", &imaplen); - if (!imap || imaplen <= (addrcells + intcells)) + imaplen /= sizeof(*imap); + if (!imap) return NULL; - imap_end = imap + imaplen; - while (imap < imap_end) { - addr = imap; - imap += addrcells; + imap_end = imap + imaplen; - sup_args.np = np; - sup_args.args_count = intcells; - for (i = 0; i < intcells; i++) - sup_args.args[i] = be32_to_cpu(imap[i]); - imap += intcells; + for (int i = 0; imap + addrcells + intcells + 1 < imap_end; i++) { + imap += addrcells + intcells; - /* - * Upon success, the function of_irq_parse_raw() returns - * interrupt controller DT node pointer in sup_args.np. - */ - if (of_irq_parse_raw(addr, &sup_args)) + imap = of_irq_parse_imap_parent(imap, imap_end - imap, &sup_args); + if (!imap) return NULL; - if (!index) + if (i == index) return sup_args.np; of_node_put(sup_args.np); - imap += sup_args.args_count + 1; - index--; } return NULL; From 2d707b4e37f9b0c37b8b2392f91b04c5b63ea538 Mon Sep 17 00:00:00 2001 From: Yong-Xuan Wang Date: Mon, 15 Apr 2024 14:49:04 +0800 Subject: [PATCH 148/537] RISC-V: KVM: No need to use mask when hart-index-bit is 0 When the maximum hart number within groups is 1, hart-index-bit is set to 0. Consequently, there is no need to restore the hart ID from IMSIC addresses and hart-index-bit settings. Currently, QEMU and kvmtool do not pass correct hart-index-bit values when the maximum hart number is a power of 2, thereby avoiding this issue. Corresponding patches for QEMU and kvmtool will also be dispatched. Fixes: 89d01306e34d ("RISC-V: KVM: Implement device interface for AIA irqchip") Signed-off-by: Yong-Xuan Wang Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20240415064905.25184-1-yongxuan.wang@sifive.com Signed-off-by: Anup Patel --- arch/riscv/kvm/aia_device.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c index 0eb689351b7d0..5cd407c6a8e4f 100644 --- a/arch/riscv/kvm/aia_device.c +++ b/arch/riscv/kvm/aia_device.c @@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr) static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr) { - u32 hart, group = 0; + u32 hart = 0, group = 0; - hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) & - GENMASK_ULL(aia->nr_hart_bits - 1, 0); + if (aia->nr_hart_bits) + hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) & + GENMASK_ULL(aia->nr_hart_bits - 1, 0); if (aia->nr_group_bits) group = (addr >> aia->nr_group_shift) & GENMASK_ULL(aia->nr_group_bits - 1, 0); From c66f3b40b17d3dfc4b6abb5efde8e71c46971821 Mon Sep 17 00:00:00 2001 From: Quan Zhou Date: Thu, 23 May 2024 10:13:34 +0800 Subject: [PATCH 149/537] RISC-V: KVM: Fix incorrect reg_subtype labels in kvm_riscv_vcpu_set_reg_isa_ext function In the function kvm_riscv_vcpu_set_reg_isa_ext, the original code used incorrect reg_subtype labels KVM_REG_RISCV_SBI_MULTI_EN/DIS. These have been corrected to KVM_REG_RISCV_ISA_MULTI_EN/DIS respectively. Although they are numerically equivalent, the actual processing will not result in errors, but it may lead to ambiguous code semantics. Fixes: 613029442a4b ("RISC-V: KVM: Extend ONE_REG to enable/disable multiple ISA extensions") Signed-off-by: Quan Zhou Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/ff1c6771a67d660db94372ac9aaa40f51e5e0090.1716429371.git.zhouquan@iscas.ac.cn Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_onereg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index c676275ea0a01..62874fbca29ff 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -724,9 +724,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu, switch (reg_subtype) { case KVM_REG_RISCV_ISA_SINGLE: return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val); - case KVM_REG_RISCV_SBI_MULTI_EN: + case KVM_REG_RISCV_ISA_MULTI_EN: return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true); - case KVM_REG_RISCV_SBI_MULTI_DIS: + case KVM_REG_RISCV_ISA_MULTI_DIS: return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false); default: return -ENOENT; From e0eec24e2e199873f43df99ec39773ad3af2bff7 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 29 May 2024 09:39:10 +0200 Subject: [PATCH 150/537] memblock: make memblock_set_node() also warn about use of MAX_NUMNODES On an (old) x86 system with SRAT just covering space above 4Gb: ACPI: SRAT: Node 0 PXM 0 [mem 0x100000000-0xfffffffff] hotplug the commit referenced below leads to this NUMA configuration no longer being refused by a CONFIG_NUMA=y kernel (previously NUMA: nodes only cover 6144MB of your 8185MB e820 RAM. Not used. No NUMA configuration found Faking a node at [mem 0x0000000000000000-0x000000027fffffff] was seen in the log directly after the message quoted above), because of memblock_validate_numa_coverage() checking for NUMA_NO_NODE (only). This in turn led to memblock_alloc_range_nid()'s warning about MAX_NUMNODES triggering, followed by a NULL deref in memmap_init() when trying to access node 64's (NODE_SHIFT=6) node data. To compensate said change, make memblock_set_node() warn on and adjust a passed in value of MAX_NUMNODES, just like various other functions already do. Fixes: ff6c3d81f2e8 ("NUMA: optimize detection of memory with no node id assigned by firmware") Signed-off-by: Jan Beulich Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/1c8a058c-5365-4f27-a9f1-3aeb7fb3e7b2@suse.com Signed-off-by: Mike Rapoport (IBM) --- mm/memblock.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/memblock.c b/mm/memblock.c index d09136e040d3c..08e9806b1cf91 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1339,6 +1339,10 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, int start_rgn, end_rgn; int i, ret; + if (WARN_ONCE(nid == MAX_NUMNODES, + "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) + nid = NUMA_NO_NODE; + ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); if (ret) return ret; From 985cfe501b74f214905ab4817acee0df24627268 Mon Sep 17 00:00:00 2001 From: Aapo Vienamo Date: Fri, 24 May 2024 18:53:17 +0300 Subject: [PATCH 151/537] thunderbolt: debugfs: Fix margin debugfs node creation condition The margin debugfs node controls the "Enable Margin Test" field of the lane margining operations. This field selects between either low or high voltage margin values for voltage margin test or left or right timing margin values for timing margin test. According to the USB4 specification, whether or not the "Enable Margin Test" control applies, depends on the values of the "Independent High/Low Voltage Margin" or "Independent Left/Right Timing Margin" capability fields for voltage and timing margin tests respectively. The pre-existing condition enabled the debugfs node also in the case where both low/high or left/right margins are returned, which is incorrect. This change only enables the debugfs node in question, if the specific required capability values are met. Signed-off-by: Aapo Vienamo Fixes: d0f1e0c2a699 ("thunderbolt: Add support for receiver lane margining") Cc: stable@vger.kernel.org Signed-off-by: Mika Westerberg --- drivers/thunderbolt/debugfs.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index 193e9dfc983b6..70b52aac3d97d 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port) debugfs_create_file("run", 0600, dir, port, &margining_run_fops); debugfs_create_file("results", 0600, dir, port, &margining_results_fops); debugfs_create_file("test", 0600, dir, port, &margining_test_fops); - if (independent_voltage_margins(usb4) || - (supports_time(usb4) && independent_time_margins(usb4))) + if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL || + (supports_time(usb4) && + independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR)) debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops); } From 539d33b5783804f22a62bd62ff463dfd1cef4265 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 16 May 2024 14:37:24 +0100 Subject: [PATCH 152/537] drm/komeda: remove unused struct 'gamma_curve_segment' 'gamma_curve_segment' looks like it has never been used. Remove it. Signed-off-by: Dr. David Alan Gilbert Reviewed-by: Dmitry Baryshkov Acked-by: Liviu Dudau Link: https://patchwork.freedesktop.org/patch/msgid/20240516133724.251750-1-linux@treblig.org Signed-off-by: Liviu Dudau --- drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c index d8e449e6ebda2..50cb8f7ee6b2c 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_color_mgmt.c @@ -72,11 +72,6 @@ struct gamma_curve_sector { u32 segment_width; }; -struct gamma_curve_segment { - u32 start; - u32 end; -}; - static struct gamma_curve_sector sector_tbl[] = { { 0, 4, 4 }, { 16, 4, 4 }, From 0f42bdf59b4e428485aa922bef871bfa6cc505e0 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Fri, 31 May 2024 18:43:37 +0900 Subject: [PATCH 153/537] selftests/tracing: Fix event filter test to retry up to 10 times Commit eb50d0f250e9 ("selftests/ftrace: Choose target function for filter test from samples") choose the target function from samples, but sometimes this test failes randomly because the target function does not hit at the next time. So retry getting samples up to 10 times. Fixes: eb50d0f250e9 ("selftests/ftrace: Choose target function for filter test from samples") Signed-off-by: Masami Hiramatsu (Google) Signed-off-by: Shuah Khan --- .../test.d/filter/event-filter-function.tc | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc index 3f74c09c56b62..118247b8dd84d 100644 --- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc +++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc @@ -10,7 +10,6 @@ fail() { #msg } sample_events() { - echo > trace echo 1 > events/kmem/kmem_cache_free/enable echo 1 > tracing_on ls > /dev/null @@ -22,6 +21,7 @@ echo 0 > tracing_on echo 0 > events/enable echo "Get the most frequently calling function" +echo > trace sample_events target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'` @@ -32,7 +32,16 @@ echo > trace echo "Test event filter function name" echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter + +sample_events +max_retry=10 +while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do sample_events +max_retry=$((max_retry - 1)) +if [ $max_retry -eq 0 ]; then + exit_fail +fi +done hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l` misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l` @@ -49,7 +58,16 @@ address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1` echo "Test event filter function address" echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter +echo > trace +sample_events +max_retry=10 +while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do sample_events +max_retry=$((max_retry - 1)) +if [ $max_retry -eq 0 ]; then + exit_fail +fi +done hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l` misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l` From 7dc3bfcb4c9cc58970fff6aaa48172cb224d85aa Mon Sep 17 00:00:00 2001 From: Chunguang Xu Date: Fri, 31 May 2024 17:24:21 +0800 Subject: [PATCH 154/537] nvme-fabrics: use reserved tag for reg read/write command In some scenarios, if too many commands are issued by nvme command in the same time by user tasks, this may exhaust all tags of admin_q. If a reset (nvme reset or IO timeout) occurs before these commands finish, reconnect routine may fail to update nvme regs due to insufficient tags, which will cause kernel hang forever. In order to workaround this issue, maybe we can let reg_read32()/reg_read64()/reg_write32() use reserved tags. This maybe safe for nvmf: 1. For the disable ctrl path, we will not issue connect command 2. For the enable ctrl / fw activate path, since connect and reg_xx() are called serially. So the reserved tags may still be enough while reg_xx() use reserved tags. Signed-off-by: Chunguang Xu Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/fabrics.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index c6ad2148c2e04..ceb9c0ed3120b 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -180,7 +180,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, - NVME_QID_ANY, 0); + NVME_QID_ANY, NVME_SUBMIT_RESERVED); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -226,7 +226,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) cmd.prop_get.offset = cpu_to_le32(off); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, - NVME_QID_ANY, 0); + NVME_QID_ANY, NVME_SUBMIT_RESERVED); if (ret >= 0) *val = le64_to_cpu(res.u64); @@ -271,7 +271,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) cmd.prop_set.value = cpu_to_le64(val); ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, - NVME_QID_ANY, 0); + NVME_QID_ANY, NVME_SUBMIT_RESERVED); if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", From 32c75ad4a79259609ee19f749832bc2d99bbdd13 Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Fri, 31 May 2024 13:07:53 -0700 Subject: [PATCH 155/537] selftests/futex: don't redefine .PHONY targets (all, clean) The .PHONY targets "all" and "clean" are both already defined in the file that is included in the very next line: ../lib.mk. Remove this duplicate code. Reviewed-by: Davidlohr Bueso Signed-off-by: John Hubbard Signed-off-by: Shuah Khan --- tools/testing/selftests/futex/Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index 11e157d7533b8..78ab2cd111f60 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile @@ -3,8 +3,6 @@ SUBDIRS := functional TEST_PROGS := run.sh -.PHONY: all clean - include ../lib.mk all: From 4bf15b1c657d22d1d70173e43264e4606dfe75ff Mon Sep 17 00:00:00 2001 From: John Hubbard Date: Fri, 31 May 2024 13:07:54 -0700 Subject: [PATCH 156/537] selftests/futex: don't pass a const char* to asprintf(3) When building with clang, via: make LLVM=1 -C tools/testing/selftests ...clang issues this warning: futex_requeue_pi.c:403:17: warning: passing 'const char **' to parameter of type 'char **' discards qualifiers in nested pointer types [-Wincompatible-pointer-types-discards-qualifiers] This warning fires because test_name is passed into asprintf(3), which then changes it. Fix this by simply removing the const qualifier. This is a local automatic variable in a very short function, so there is not much need to use the compiler to enforce const-ness at this scope. [1] https://lore.kernel.org/all/20240329-selftests-libmk-llvm-rfc-v1-1-2f9ed7d1c49f@valentinobst.de/ Fixes: f17d8a87ecb5 ("selftests: fuxex: Report a unique test name per run of futex_requeue_pi") Reviewed-by: Davidlohr Bueso Signed-off-by: John Hubbard Signed-off-by: Shuah Khan --- tools/testing/selftests/futex/functional/futex_requeue_pi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c index 7f3ca5c78df12..215c6cb539b4a 100644 --- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c +++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c @@ -360,7 +360,7 @@ int unit_test(int broadcast, long lock, int third_party_owner, long timeout_ns) int main(int argc, char *argv[]) { - const char *test_name; + char *test_name; int c, ret; while ((c = getopt(argc, argv, "bchlot:v:")) != -1) { From b1a1fdd7096dd2d67911b07f8118ff113d815db4 Mon Sep 17 00:00:00 2001 From: Weiwen Hu Date: Thu, 30 May 2024 14:16:46 +0800 Subject: [PATCH 157/537] nvme: fix nvme_pr_* status code parsing Fix the parsing if extra status bits (e.g. MORE) is present. Fixes: 7fb42780d06c ("nvme: Convert NVMe errors to PR errors") Signed-off-by: Weiwen Hu Signed-off-by: Keith Busch --- drivers/nvme/host/pr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index e05571b2a1b0c..8fa1ffcdaed48 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -77,7 +77,7 @@ static int nvme_sc_to_pr_err(int nvme_sc) if (nvme_is_path_error(nvme_sc)) return PR_STS_PATH_FAILED; - switch (nvme_sc) { + switch (nvme_sc & 0x7ff) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: From b7c5e64fecfa88764791679cca4786ac65de739e Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Wed, 29 May 2024 22:52:30 -0600 Subject: [PATCH 158/537] vfio: Create vfio_fs_type with inode per device By linking all the device fds we provide to userspace to an address space through a new pseudo fs, we can use tools like unmap_mapping_range() to zap all vmas associated with a device. Suggested-by: Jason Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240530045236.1005864-2-alex.williamson@redhat.com Signed-off-by: Alex Williamson --- drivers/vfio/device_cdev.c | 7 ++++++ drivers/vfio/group.c | 7 ++++++ drivers/vfio/vfio_main.c | 44 ++++++++++++++++++++++++++++++++++++++ include/linux/vfio.h | 1 + 4 files changed, 59 insertions(+) diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c index e75da0a70d1f8..bb1817bd4ff31 100644 --- a/drivers/vfio/device_cdev.c +++ b/drivers/vfio/device_cdev.c @@ -39,6 +39,13 @@ int vfio_device_fops_cdev_open(struct inode *inode, struct file *filep) filep->private_data = df; + /* + * Use the pseudo fs inode on the device to link all mmaps + * to the same address space, allowing us to unmap all vmas + * associated to this device using unmap_mapping_range(). + */ + filep->f_mapping = device->inode->i_mapping; + return 0; err_put_registration: diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c index 610a429c61912..ded364588d297 100644 --- a/drivers/vfio/group.c +++ b/drivers/vfio/group.c @@ -286,6 +286,13 @@ static struct file *vfio_device_open_file(struct vfio_device *device) */ filep->f_mode |= (FMODE_PREAD | FMODE_PWRITE); + /* + * Use the pseudo fs inode on the device to link all mmaps + * to the same address space, allowing us to unmap all vmas + * associated to this device using unmap_mapping_range(). + */ + filep->f_mapping = device->inode->i_mapping; + if (device->group->type == VFIO_NO_IOMMU) dev_warn(device->dev, "vfio-noiommu device opened by user " "(%s:%d)\n", current->comm, task_pid_nr(current)); diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index e97d796a54fba..a5a62d9d963f7 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -22,8 +22,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -43,9 +45,13 @@ #define DRIVER_AUTHOR "Alex Williamson " #define DRIVER_DESC "VFIO - User Level meta-driver" +#define VFIO_MAGIC 0x5646494f /* "VFIO" */ + static struct vfio { struct class *device_class; struct ida device_ida; + struct vfsmount *vfs_mount; + int fs_count; } vfio; #ifdef CONFIG_VFIO_NOIOMMU @@ -186,6 +192,8 @@ static void vfio_device_release(struct device *dev) if (device->ops->release) device->ops->release(device); + iput(device->inode); + simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); kvfree(device); } @@ -228,6 +236,34 @@ struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev, } EXPORT_SYMBOL_GPL(_vfio_alloc_device); +static int vfio_fs_init_fs_context(struct fs_context *fc) +{ + return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM; +} + +static struct file_system_type vfio_fs_type = { + .name = "vfio", + .owner = THIS_MODULE, + .init_fs_context = vfio_fs_init_fs_context, + .kill_sb = kill_anon_super, +}; + +static struct inode *vfio_fs_inode_new(void) +{ + struct inode *inode; + int ret; + + ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count); + if (ret) + return ERR_PTR(ret); + + inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb); + if (IS_ERR(inode)) + simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); + + return inode; +} + /* * Initialize a vfio_device so it can be registered to vfio core. */ @@ -246,6 +282,11 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev, init_completion(&device->comp); device->dev = dev; device->ops = ops; + device->inode = vfio_fs_inode_new(); + if (IS_ERR(device->inode)) { + ret = PTR_ERR(device->inode); + goto out_inode; + } if (ops->init) { ret = ops->init(device); @@ -260,6 +301,9 @@ static int vfio_init_device(struct vfio_device *device, struct device *dev, return 0; out_uninit: + iput(device->inode); + simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); +out_inode: vfio_release_device_set(device); ida_free(&vfio.device_ida, device->index); return ret; diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 8b1a298204091..000a6cab2d318 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -64,6 +64,7 @@ struct vfio_device { struct completion comp; struct iommufd_access *iommufd_access; void (*put_kvm)(struct kvm *kvm); + struct inode *inode; #if IS_ENABLED(CONFIG_IOMMUFD) struct iommufd_device *iommufd_device; u8 iommufd_attached:1; From aac6db75a9fc2c7a6f73e152df8f15101dda38e6 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Wed, 29 May 2024 22:52:31 -0600 Subject: [PATCH 159/537] vfio/pci: Use unmap_mapping_range() With the vfio device fd tied to the address space of the pseudo fs inode, we can use the mm to track all vmas that might be mmap'ing device BARs, which removes our vma_list and all the complicated lock ordering necessary to manually zap each related vma. Note that we can no longer store the pfn in vm_pgoff if we want to use unmap_mapping_range() to zap a selective portion of the device fd corresponding to BAR mappings. This also converts our mmap fault handler to use vmf_insert_pfn() because we no longer have a vma_list to avoid the concurrency problem with io_remap_pfn_range(). The goal is to eventually use the vm_ops huge_fault handler to avoid the additional faulting overhead, but vmf_insert_pfn_{pmd,pud}() need to learn about pfnmaps first. Also, Jason notes that a race exists between unmap_mapping_range() and the fops mmap callback if we were to call io_remap_pfn_range() to populate the vma on mmap. Specifically, mmap_region() does call_mmap() before it does vma_link_file() which gives a window where the vma is populated but invisible to unmap_mapping_range(). Suggested-by: Jason Gunthorpe Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240530045236.1005864-3-alex.williamson@redhat.com Signed-off-by: Alex Williamson --- drivers/vfio/pci/vfio_pci_core.c | 264 +++++++------------------------ include/linux/vfio_pci_core.h | 2 - 2 files changed, 55 insertions(+), 211 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 80cae87fff36e..db31c27bf78bc 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1610,100 +1610,20 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu } EXPORT_SYMBOL_GPL(vfio_pci_core_write); -/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ -static int vfio_pci_zap_and_vma_lock(struct vfio_pci_core_device *vdev, bool try) +static void vfio_pci_zap_bars(struct vfio_pci_core_device *vdev) { - struct vfio_pci_mmap_vma *mmap_vma, *tmp; + struct vfio_device *core_vdev = &vdev->vdev; + loff_t start = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_BAR0_REGION_INDEX); + loff_t end = VFIO_PCI_INDEX_TO_OFFSET(VFIO_PCI_ROM_REGION_INDEX); + loff_t len = end - start; - /* - * Lock ordering: - * vma_lock is nested under mmap_lock for vm_ops callback paths. - * The memory_lock semaphore is used by both code paths calling - * into this function to zap vmas and the vm_ops.fault callback - * to protect the memory enable state of the device. - * - * When zapping vmas we need to maintain the mmap_lock => vma_lock - * ordering, which requires using vma_lock to walk vma_list to - * acquire an mm, then dropping vma_lock to get the mmap_lock and - * reacquiring vma_lock. This logic is derived from similar - * requirements in uverbs_user_mmap_disassociate(). - * - * mmap_lock must always be the top-level lock when it is taken. - * Therefore we can only hold the memory_lock write lock when - * vma_list is empty, as we'd need to take mmap_lock to clear - * entries. vma_list can only be guaranteed empty when holding - * vma_lock, thus memory_lock is nested under vma_lock. - * - * This enables the vm_ops.fault callback to acquire vma_lock, - * followed by memory_lock read lock, while already holding - * mmap_lock without risk of deadlock. - */ - while (1) { - struct mm_struct *mm = NULL; - - if (try) { - if (!mutex_trylock(&vdev->vma_lock)) - return 0; - } else { - mutex_lock(&vdev->vma_lock); - } - while (!list_empty(&vdev->vma_list)) { - mmap_vma = list_first_entry(&vdev->vma_list, - struct vfio_pci_mmap_vma, - vma_next); - mm = mmap_vma->vma->vm_mm; - if (mmget_not_zero(mm)) - break; - - list_del(&mmap_vma->vma_next); - kfree(mmap_vma); - mm = NULL; - } - if (!mm) - return 1; - mutex_unlock(&vdev->vma_lock); - - if (try) { - if (!mmap_read_trylock(mm)) { - mmput(mm); - return 0; - } - } else { - mmap_read_lock(mm); - } - if (try) { - if (!mutex_trylock(&vdev->vma_lock)) { - mmap_read_unlock(mm); - mmput(mm); - return 0; - } - } else { - mutex_lock(&vdev->vma_lock); - } - list_for_each_entry_safe(mmap_vma, tmp, - &vdev->vma_list, vma_next) { - struct vm_area_struct *vma = mmap_vma->vma; - - if (vma->vm_mm != mm) - continue; - - list_del(&mmap_vma->vma_next); - kfree(mmap_vma); - - zap_vma_ptes(vma, vma->vm_start, - vma->vm_end - vma->vm_start); - } - mutex_unlock(&vdev->vma_lock); - mmap_read_unlock(mm); - mmput(mm); - } + unmap_mapping_range(core_vdev->inode->i_mapping, start, len, true); } void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev) { - vfio_pci_zap_and_vma_lock(vdev, false); down_write(&vdev->memory_lock); - mutex_unlock(&vdev->vma_lock); + vfio_pci_zap_bars(vdev); } u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev) @@ -1725,99 +1645,41 @@ void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, u16 c up_write(&vdev->memory_lock); } -/* Caller holds vma_lock */ -static int __vfio_pci_add_vma(struct vfio_pci_core_device *vdev, - struct vm_area_struct *vma) -{ - struct vfio_pci_mmap_vma *mmap_vma; - - mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL_ACCOUNT); - if (!mmap_vma) - return -ENOMEM; - - mmap_vma->vma = vma; - list_add(&mmap_vma->vma_next, &vdev->vma_list); - - return 0; -} - -/* - * Zap mmaps on open so that we can fault them in on access and therefore - * our vma_list only tracks mappings accessed since last zap. - */ -static void vfio_pci_mmap_open(struct vm_area_struct *vma) -{ - zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); -} - -static void vfio_pci_mmap_close(struct vm_area_struct *vma) +static unsigned long vma_to_pfn(struct vm_area_struct *vma) { struct vfio_pci_core_device *vdev = vma->vm_private_data; - struct vfio_pci_mmap_vma *mmap_vma; + int index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + u64 pgoff; - mutex_lock(&vdev->vma_lock); - list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { - if (mmap_vma->vma == vma) { - list_del(&mmap_vma->vma_next); - kfree(mmap_vma); - break; - } - } - mutex_unlock(&vdev->vma_lock); + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + + return (pci_resource_start(vdev->pdev, index) >> PAGE_SHIFT) + pgoff; } static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct vfio_pci_core_device *vdev = vma->vm_private_data; - struct vfio_pci_mmap_vma *mmap_vma; - vm_fault_t ret = VM_FAULT_NOPAGE; + unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff; + vm_fault_t ret = VM_FAULT_SIGBUS; - mutex_lock(&vdev->vma_lock); - down_read(&vdev->memory_lock); + pfn = vma_to_pfn(vma); - /* - * Memory region cannot be accessed if the low power feature is engaged - * or memory access is disabled. - */ - if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) { - ret = VM_FAULT_SIGBUS; - goto up_out; - } + down_read(&vdev->memory_lock); - /* - * We populate the whole vma on fault, so we need to test whether - * the vma has already been mapped, such as for concurrent faults - * to the same vma. io_remap_pfn_range() will trigger a BUG_ON if - * we ask it to fill the same range again. - */ - list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { - if (mmap_vma->vma == vma) - goto up_out; - } + if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) + goto out_disabled; - if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) { - ret = VM_FAULT_SIGBUS; - zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); - goto up_out; - } + ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); - if (__vfio_pci_add_vma(vdev, vma)) { - ret = VM_FAULT_OOM; - zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); - } - -up_out: +out_disabled: up_read(&vdev->memory_lock); - mutex_unlock(&vdev->vma_lock); + return ret; } static const struct vm_operations_struct vfio_pci_mmap_ops = { - .open = vfio_pci_mmap_open, - .close = vfio_pci_mmap_close, .fault = vfio_pci_mmap_fault, }; @@ -1880,11 +1742,12 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma vma->vm_private_data = vdev; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); /* - * See remap_pfn_range(), called from vfio_pci_fault() but we can't - * change vm_flags within the fault handler. Set them now. + * Set vm_flags now, they should not be changed in the fault handler. + * We want the same flags and page protection (decrypted above) as + * io_remap_pfn_range() would set. * * VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64, * allowing KVM stage 2 device mapping attributes to use Normal-NC @@ -2202,8 +2065,6 @@ int vfio_pci_core_init_dev(struct vfio_device *core_vdev) mutex_init(&vdev->ioeventfds_lock); INIT_LIST_HEAD(&vdev->dummy_resources_list); INIT_LIST_HEAD(&vdev->ioeventfds_list); - mutex_init(&vdev->vma_lock); - INIT_LIST_HEAD(&vdev->vma_list); INIT_LIST_HEAD(&vdev->sriov_pfs_item); init_rwsem(&vdev->memory_lock); xa_init(&vdev->ctx); @@ -2219,7 +2080,6 @@ void vfio_pci_core_release_dev(struct vfio_device *core_vdev) mutex_destroy(&vdev->igate); mutex_destroy(&vdev->ioeventfds_lock); - mutex_destroy(&vdev->vma_lock); kfree(vdev->region); kfree(vdev->pm_save); } @@ -2497,26 +2357,15 @@ static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set) return ret; } -/* - * We need to get memory_lock for each device, but devices can share mmap_lock, - * therefore we need to zap and hold the vma_lock for each device, and only then - * get each memory_lock. - */ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, struct vfio_pci_group_info *groups, struct iommufd_ctx *iommufd_ctx) { - struct vfio_pci_core_device *cur_mem; - struct vfio_pci_core_device *cur_vma; - struct vfio_pci_core_device *cur; + struct vfio_pci_core_device *vdev; struct pci_dev *pdev; - bool is_mem = true; int ret; mutex_lock(&dev_set->lock); - cur_mem = list_first_entry(&dev_set->device_list, - struct vfio_pci_core_device, - vdev.dev_set_list); pdev = vfio_pci_dev_set_resettable(dev_set); if (!pdev) { @@ -2533,7 +2382,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, if (ret) goto err_unlock; - list_for_each_entry(cur_vma, &dev_set->device_list, vdev.dev_set_list) { + list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) { bool owned; /* @@ -2557,38 +2406,38 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, * Otherwise, reset is not allowed. */ if (iommufd_ctx) { - int devid = vfio_iommufd_get_dev_id(&cur_vma->vdev, + int devid = vfio_iommufd_get_dev_id(&vdev->vdev, iommufd_ctx); owned = (devid > 0 || devid == -ENOENT); } else { - owned = vfio_dev_in_groups(&cur_vma->vdev, groups); + owned = vfio_dev_in_groups(&vdev->vdev, groups); } if (!owned) { ret = -EINVAL; - goto err_undo; + break; } /* - * Locking multiple devices is prone to deadlock, runaway and - * unwind if we hit contention. + * Take the memory write lock for each device and zap BAR + * mappings to prevent the user accessing the device while in + * reset. Locking multiple devices is prone to deadlock, + * runaway and unwind if we hit contention. */ - if (!vfio_pci_zap_and_vma_lock(cur_vma, true)) { + if (!down_write_trylock(&vdev->memory_lock)) { ret = -EBUSY; - goto err_undo; + break; } + + vfio_pci_zap_bars(vdev); } - cur_vma = NULL; - list_for_each_entry(cur_mem, &dev_set->device_list, vdev.dev_set_list) { - if (!down_write_trylock(&cur_mem->memory_lock)) { - ret = -EBUSY; - goto err_undo; - } - mutex_unlock(&cur_mem->vma_lock); + if (!list_entry_is_head(vdev, + &dev_set->device_list, vdev.dev_set_list)) { + vdev = list_prev_entry(vdev, vdev.dev_set_list); + goto err_undo; } - cur_mem = NULL; /* * The pci_reset_bus() will reset all the devices in the bus. @@ -2599,25 +2448,22 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, * cause the PCI config space reset without restoring the original * state (saved locally in 'vdev->pm_save'). */ - list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) - vfio_pci_set_power_state(cur, PCI_D0); + list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) + vfio_pci_set_power_state(vdev, PCI_D0); ret = pci_reset_bus(pdev); + vdev = list_last_entry(&dev_set->device_list, + struct vfio_pci_core_device, vdev.dev_set_list); + err_undo: - list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) { - if (cur == cur_mem) - is_mem = false; - if (cur == cur_vma) - break; - if (is_mem) - up_write(&cur->memory_lock); - else - mutex_unlock(&cur->vma_lock); - } + list_for_each_entry_from_reverse(vdev, &dev_set->device_list, + vdev.dev_set_list) + up_write(&vdev->memory_lock); + + list_for_each_entry(vdev, &dev_set->device_list, vdev.dev_set_list) + pm_runtime_put(&vdev->pdev->dev); - list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) - pm_runtime_put(&cur->pdev->dev); err_unlock: mutex_unlock(&dev_set->lock); return ret; diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index a2c8b8bba7119..f87067438ed48 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -93,8 +93,6 @@ struct vfio_pci_core_device { struct list_head sriov_pfs_item; struct vfio_pci_core_device *sriov_pf_core_dev; struct notifier_block nb; - struct mutex vma_lock; - struct list_head vma_list; struct rw_semaphore memory_lock; }; From 62da3acd28955e7299babebdfcb14243b789e773 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 28 May 2024 15:32:18 -0700 Subject: [PATCH 160/537] selftests/bpf: fix inet_csk_accept prototype in test_sk_storage_tracing.c Recent kernel change ([0]) changed inet_csk_accept() prototype. Adapt progs/test_sk_storage_tracing.c to take that into account. [0] 92ef0fd55ac8 ("net: change proto and proto_ops accept type") Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240528223218.3445297-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c index 02e718f06e0f5..40531e56776e4 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c +++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c @@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk) } SEC("fexit/inet_csk_accept") -int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern, +int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg, struct sock *accepted_sk) { set_task_info(accepted_sk); From aeb8fe0283d4d3b0f27a87c5f5c938e7324f7d8f Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 31 May 2024 21:45:00 +0200 Subject: [PATCH 161/537] bpf: Fix bpf_session_cookie BTF_ID in special_kfunc_set list The bpf_session_cookie is unavailable for !CONFIG_FPROBE as reported by Sebastian [1]. To fix that we remove CONFIG_FPROBE ifdef for session kfuncs, which is fine, because there's filter for session programs. Then based on bpf_trace.o dependency: obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o we add bpf_session_cookie BTF_ID in special_kfunc_set list dependency on CONFIG_BPF_EVENTS. [1] https://lore.kernel.org/bpf/20240531071557.MvfIqkn7@linutronix.de/T/#m71c6d5ec71db2967288cb79acedc15cc5dbfeec5 Reported-by: Sebastian Andrzej Siewior Tested-by: Sebastian Andrzej Siewior Suggested-by: Alexei Starovoitov Fixes: 5c919acef8514 ("bpf: Add support for kprobe session cookie") Signed-off-by: Jiri Olsa Link: https://lore.kernel.org/r/20240531194500.2967187-1-jolsa@kernel.org Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 4 ++++ kernel/trace/bpf_trace.c | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 48f3a9acdef3d..36ef8e96787ed 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11128,7 +11128,11 @@ BTF_ID(func, bpf_iter_css_task_new) #else BTF_ID_UNUSED #endif +#ifdef CONFIG_BPF_EVENTS BTF_ID(func, bpf_session_cookie) +#else +BTF_ID_UNUSED +#endif static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 6249dac617018..d1daeab1bbc14 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3517,7 +3517,6 @@ static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) } #endif /* CONFIG_UPROBES */ -#ifdef CONFIG_FPROBE __bpf_kfunc_start_defs(); __bpf_kfunc bool bpf_session_is_return(void) @@ -3566,4 +3565,3 @@ static int __init bpf_kprobe_multi_kfuncs_init(void) } late_initcall(bpf_kprobe_multi_kfuncs_init); -#endif From 7d0b3953f6d832daec10a0d76e2d4db405768a8b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 May 2024 16:12:12 -0700 Subject: [PATCH 162/537] libbpf: don't close(-1) in multi-uprobe feature detector Guard close(link_fd) with extra link_fd >= 0 check to prevent close(-1). Detected by Coverity static analysis. Fixes: 04d939a2ab22 ("libbpf: detect broken PID filtering logic for multi-uprobe") Signed-off-by: Andrii Nakryiko Acked-by: Jiri Olsa Link: https://lore.kernel.org/r/20240529231212.768828-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/features.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c index 3df0125ed5fa7..50befe125ddc5 100644 --- a/tools/lib/bpf/features.c +++ b/tools/lib/bpf/features.c @@ -393,7 +393,8 @@ static int probe_uprobe_multi_link(int token_fd) err = -errno; /* close() can clobber errno */ if (link_fd >= 0 || err != -EBADF) { - close(link_fd); + if (link_fd >= 0) + close(link_fd); close(prog_fd); return 0; } From ebfb5e8fc8b45040b979f4bf0012a01d0abac8d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexis=20Lothor=C3=A9?= Date: Tue, 28 May 2024 16:20:28 +0200 Subject: [PATCH 163/537] Revert "wifi: wilc1000: convert list management to RCU" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit f236464f1db7bea80075e6e31ac70dc6eb80547f Commit f236464f1db7 ("wifi: wilc1000: convert list management to RCU") replaced SRCU with RCU, aiming to simplify RCU usage in the driver. No documentation or commit history hinted about why SRCU has been preferred in original design, so it has been assumed to be safe to do this conversion. Unfortunately, some static analyzers raised warnings, confirmed by runtime checker, not long after the merge. At least three different issues arose when switching to RCU: - wilc_wlan_txq_filter_dup_tcp_ack is executed in a RCU read critical section yet calls wait_for_completion_timeout - wilc_wfi_init_mon_interface calls kmalloc and register_netdevice while manipulating a vif retrieved from vif list - set_channel sends command to chip (and so, also waits for a completion) while holding a vif retrieved from vif list (so, in RCU read critical section) Some of those issues are not trivial to fix and would need bigger driver rework. Fix those issues by reverting the SRCU to RCU conversion commit Reported-by: Dan Carpenter Closes: https://lore.kernel.org/linux-wireless/3b46ec7c-baee-49fd-b760-3bc12fb12eaf@moroto.mountain/ Fixes: f236464f1db7 ("wifi: wilc1000: convert list management to RCU") Signed-off-by: Alexis Lothoré Signed-off-by: Kalle Valo Link: https://msgid.link/20240528-wilc_revert_srcu_to_rcu-v1-1-bce096e0798c@bootlin.com --- .../wireless/microchip/wilc1000/cfg80211.c | 41 ++++++++++-------- drivers/net/wireless/microchip/wilc1000/hif.c | 15 ++++--- .../net/wireless/microchip/wilc1000/netdev.c | 43 +++++++++++-------- .../net/wireless/microchip/wilc1000/netdev.h | 5 ++- .../net/wireless/microchip/wilc1000/wlan.c | 5 ++- 5 files changed, 64 insertions(+), 45 deletions(-) diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index 7d9fb9f2d5279..089102ed9ae51 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -237,11 +237,12 @@ static int set_channel(struct wiphy *wiphy, struct wilc_vif *vif; u32 channelnum; int result; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return PTR_ERR(vif); } @@ -252,7 +253,7 @@ static int set_channel(struct wiphy *wiphy, if (result) netdev_err(vif->ndev, "Error in setting channel\n"); - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return result; } @@ -805,8 +806,9 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed) struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; struct wilc_priv *priv; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) goto out; @@ -861,7 +863,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed) netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n"); out: - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return ret; } @@ -1537,19 +1539,20 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, if (type == NL80211_IFTYPE_MONITOR) { struct net_device *ndev; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_vif_from_type(wl, WILC_AP_MODE); if (!vif) { vif = wilc_get_vif_from_type(wl, WILC_GO_MODE); if (!vif) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); goto validate_interface; } } if (vif->monitor_flag) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); goto validate_interface; } @@ -1557,12 +1560,12 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, if (ndev) { vif->monitor_flag = 1; } else { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return ERR_PTR(-EINVAL); } wdev = &vif->priv.wdev; - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return wdev; } @@ -1610,7 +1613,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) list_del_rcu(&vif->list); wl->vif_num--; mutex_unlock(&wl->vif_mutex); - synchronize_rcu(); + synchronize_srcu(&wl->srcu); return 0; } @@ -1635,23 +1638,25 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return; } netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled); wilc_set_wowlan_trigger(vif, enabled); - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); } static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { int ret; + int srcu_idx; s32 tx_power = MBM_TO_DBM(mbm); struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; @@ -1659,10 +1664,10 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, if (!wl->initialized) return -EIO; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return -EINVAL; } @@ -1674,7 +1679,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, ret = wilc_set_tx_power(vif, tx_power); if (ret) netdev_err(vif->ndev, "Failed to set tx power\n"); - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return ret; } @@ -1757,6 +1762,7 @@ static void wlan_init_locks(struct wilc *wl) init_completion(&wl->cfg_event); init_completion(&wl->sync_event); init_completion(&wl->txq_thread_started); + init_srcu_struct(&wl->srcu); } void wlan_deinit_locks(struct wilc *wilc) @@ -1767,6 +1773,7 @@ void wlan_deinit_locks(struct wilc *wilc) mutex_destroy(&wilc->txq_add_to_head_cs); mutex_destroy(&wilc->vif_mutex); mutex_destroy(&wilc->deinit_lock); + cleanup_srcu_struct(&wilc->srcu); } int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index 919de6ffb8217..3925ca653e807 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -1570,11 +1570,12 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) struct host_if_drv *hif_drv; struct host_if_msg *msg; struct wilc_vif *vif; + int srcu_idx; int result; int id; id = get_unaligned_le32(&buffer[length - 4]); - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1606,7 +1607,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) kfree(msg); } out: - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) @@ -1614,13 +1615,14 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) struct host_if_drv *hif_drv; struct host_if_msg *msg; struct wilc_vif *vif; + int srcu_idx; int result; int id; mutex_lock(&wilc->deinit_lock); id = get_unaligned_le32(&buffer[length - 4]); - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1647,7 +1649,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) kfree(msg); } out: - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); mutex_unlock(&wilc->deinit_lock); } @@ -1655,11 +1657,12 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length) { struct host_if_drv *hif_drv; struct wilc_vif *vif; + int srcu_idx; int result; int id; id = get_unaligned_le32(&buffer[length - 4]); - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) goto out; @@ -1684,7 +1687,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length) } } out: - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan, diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 73f56f7b002bf..710e29bea5605 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -127,28 +127,30 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid, int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) { + int srcu_idx; u8 ret_val = 0; struct wilc_vif *vif; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, vif) { if (!is_zero_ether_addr(vif->bssid)) ret_val++; } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); return ret_val; } static void wilc_wake_tx_queues(struct wilc *wl) { + int srcu_idx; struct wilc_vif *ifc; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); wilc_for_each_vif(wl, ifc) { if (ifc->mac_opened && netif_queue_stopped(ifc->ndev)) netif_wake_queue(ifc->ndev); } - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); } static int wilc_txq_task(void *vp) @@ -653,6 +655,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) struct sockaddr *addr = (struct sockaddr *)p; unsigned char mac_addr[ETH_ALEN]; struct wilc_vif *tmp_vif; + int srcu_idx; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -664,19 +667,19 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) /* Verify MAC Address is not already in use: */ - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, tmp_vif) { wilc_get_mac_address(tmp_vif, mac_addr); if (ether_addr_equal(addr->sa_data, mac_addr)) { if (vif != tmp_vif) { - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); return -EADDRNOTAVAIL; } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); return 0; } } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); result = wilc_set_mac_address(vif, (u8 *)addr->sa_data); if (result) @@ -764,14 +767,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) wilc_tx_complete); if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) { + int srcu_idx; struct wilc_vif *vif; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, vif) { if (vif->mac_opened) netif_stop_queue(vif->ndev); } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } return NETDEV_TX_OK; @@ -815,12 +819,13 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, unsigned int frame_len = 0; struct wilc_vif *vif; struct sk_buff *skb; + int srcu_idx; int stats; if (!wilc) return; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_netdev = get_if_handler(wilc, buff); if (!wilc_netdev) goto out; @@ -848,14 +853,15 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats); } out: - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth) { + int srcu_idx; struct wilc_vif *vif; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, vif) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff; u16 type = le16_to_cpup((__le16 *)buff); @@ -876,7 +882,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth) if (vif->monitor_flag) wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size); } - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); } static const struct net_device_ops wilc_netdev_ops = { @@ -906,7 +912,7 @@ void wilc_netdev_cleanup(struct wilc *wilc) list_del_rcu(&vif->list); wilc->vif_num--; mutex_unlock(&wilc->vif_mutex); - synchronize_rcu(); + synchronize_srcu(&wilc->srcu); if (vif->ndev) unregister_netdev(vif->ndev); } @@ -925,15 +931,16 @@ static u8 wilc_get_available_idx(struct wilc *wl) { int idx = 0; struct wilc_vif *vif; + int srcu_idx; - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wl->srcu); wilc_for_each_vif(wl, vif) { if (vif->idx == 0) idx = 1; else idx = 0; } - rcu_read_unlock(); + srcu_read_unlock(&wl->srcu, srcu_idx); return idx; } @@ -983,7 +990,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, list_add_tail_rcu(&vif->list, &wl->vif_list); wl->vif_num += 1; mutex_unlock(&wl->vif_mutex); - synchronize_rcu(); + synchronize_srcu(&wl->srcu); return vif; diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index eecee3973d6a4..5937d6d45695f 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -32,8 +32,8 @@ #define wilc_for_each_vif(w, v) \ struct wilc *_w = w; \ - list_for_each_entry_rcu(v, &_w->vif_list, list, \ - rcu_read_lock_held()) + list_for_each_entry_srcu(v, &_w->vif_list, list, \ + srcu_read_lock_held(&_w->srcu)) struct wilc_wfi_stats { unsigned long rx_packets; @@ -220,6 +220,7 @@ struct wilc { /* protect vif list */ struct mutex vif_mutex; + struct srcu_struct srcu; u8 open_ifcs; /* protect head of transmit queue */ diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index 37c32d17856ea..a9e872a7b2c38 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -712,6 +712,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) u32 *vmm_table = wilc->vmm_table; u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0}; const struct wilc_hif_func *func; + int srcu_idx; u8 *txb = wilc->tx_buffer; struct wilc_vif *vif; @@ -723,10 +724,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) mutex_lock(&wilc->txq_add_to_head_cs); - rcu_read_lock(); + srcu_idx = srcu_read_lock(&wilc->srcu); wilc_for_each_vif(wilc, vif) wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev); - rcu_read_unlock(); + srcu_read_unlock(&wilc->srcu, srcu_idx); for (ac = 0; ac < NQUEUES; ac++) tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac); From 3596717a6fbd54c64e97e085a9f77ed511ff59f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexis=20Lothor=C3=A9?= Date: Tue, 28 May 2024 16:20:29 +0200 Subject: [PATCH 164/537] Revert "wifi: wilc1000: set atomic flag on kmemdup in srcu critical section" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 35aee01ff43d7eb6c2caa0b94e7cc6c45baeeab7 Commit 35aee01ff43d ("wifi: wilc1000: set atomic flag on kmemdup in srcu critical section") was preparatory to the SRCU to RCU conversion done by commit f236464f1db7 ("wifi: wilc1000: convert list management to RCU"). This conversion brought issues and so has been reverted, so the atomic flag is not needed anymore. Signed-off-by: Alexis Lothoré Signed-off-by: Kalle Valo Link: https://msgid.link/20240528-wilc_revert_srcu_to_rcu-v1-2-bce096e0798c@bootlin.com --- drivers/net/wireless/microchip/wilc1000/hif.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index 3925ca653e807..f1085ccb7eedc 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -1594,7 +1594,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) msg->body.net_info.rssi = buffer[8]; msg->body.net_info.mgmt = kmemdup(&buffer[9], msg->body.net_info.frame_len, - GFP_ATOMIC); + GFP_KERNEL); if (!msg->body.net_info.mgmt) { kfree(msg); goto out; From 596c195680dceb34e6b994ff5571331d5dba8299 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexis=20Lothor=C3=A9?= Date: Tue, 28 May 2024 16:20:30 +0200 Subject: [PATCH 165/537] wifi: wilc1000: document SRCU usage instead of SRCU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit f236464f1db7 ("wifi: wilc1000: convert list management to RCU") attempted to convert SRCU to RCU usage, assuming it was not really needed. The runtime issues that arose after merging it showed that there are code paths involving sleeping functions, and removing those would need some heavier driver rework. Add some documentation about SRCU need to make sure that any future developer do not miss some use cases if tempted to convert back again to RCU. Signed-off-by: Alexis Lothoré Signed-off-by: Kalle Valo Link: https://msgid.link/20240528-wilc_revert_srcu_to_rcu-v1-3-bce096e0798c@bootlin.com --- drivers/net/wireless/microchip/wilc1000/netdev.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index 5937d6d45695f..fde8610a9c84b 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -220,6 +220,13 @@ struct wilc { /* protect vif list */ struct mutex vif_mutex; + /* Sleepable RCU struct to manipulate vif list. Sleepable version is + * needed over the classic RCU version because the driver's current + * design involves some sleeping code while manipulating a vif + * retrieved from vif list (so in a SRCU critical section), like: + * - sending commands to the chip, using info from retrieved vif + * - registering a new monitoring net device + */ struct srcu_struct srcu; u8 open_ifcs; From 40cecacabc460f5074398753feb9ed7d43e8dfa6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 28 May 2024 14:23:08 +0200 Subject: [PATCH 166/537] wifi: mt76: mt7615: add missing chanctx ops Here's another one I missed during the initial conversion, fix that. Cc: stable@vger.kernel.org Reported-by: Rene Petersen Fixes: 0a44dfc07074 ("wifi: mac80211: simplify non-chanctx drivers") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218895 Signed-off-by: Johannes Berg Signed-off-by: Kalle Valo Link: https://msgid.link/20240528142308.3f7db1821e68.I531135d7ad76331a50244d6d5288e14aa9668390@changeid --- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 0971c164b57e9..c27acaf0eb1cf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw, #endif /* CONFIG_PM */ const struct ieee80211_ops mt7615_ops = { + .add_chanctx = ieee80211_emulate_add_chanctx, + .remove_chanctx = ieee80211_emulate_remove_chanctx, + .change_chanctx = ieee80211_emulate_change_chanctx, + .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx, .tx = mt7615_tx, .start = mt7615_start, .stop = mt7615_stop, From 819bda58e77bb67974f94dc1aa11b0556b6f6889 Mon Sep 17 00:00:00 2001 From: Bitterblue Smith Date: Wed, 29 May 2024 20:19:47 +0300 Subject: [PATCH 167/537] wifi: rtlwifi: Ignore IEEE80211_CONF_CHANGE_RETRY_LIMITS Since commit 0a44dfc07074 ("wifi: mac80211: simplify non-chanctx drivers") ieee80211_hw_config() is no longer called with changed = ~0. rtlwifi relied on ~0 in order to ignore the default retry limits of 4/7, preferring 48/48 in station mode and 7/7 in AP/IBSS. RTL8192DU has a lot of packet loss with the default limits from mac80211. Fix it by ignoring IEEE80211_CONF_CHANGE_RETRY_LIMITS completely, because it's the simplest solution. Link: https://lore.kernel.org/linux-wireless/cedd13d7691f4692b2a2fa5a24d44a22@realtek.com/ Cc: stable@vger.kernel.org # 6.9.x Signed-off-by: Bitterblue Smith Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://msgid.link/1fabb8e4-adf3-47ae-8462-8aea963bc2a5@gmail.com --- drivers/net/wireless/realtek/rtlwifi/core.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 2e60a6991ca16..42b7db12b1bd4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) } } - if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { - rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, - "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n", - hw->conf.long_frame_max_tx_count); - /* brought up everything changes (changed == ~0) indicates first - * open, so use our default value instead of that of wiphy. - */ - if (changed != ~0) { - mac->retry_long = hw->conf.long_frame_max_tx_count; - mac->retry_short = hw->conf.long_frame_max_tx_count; - rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT, - (u8 *)(&hw->conf.long_frame_max_tx_count)); - } - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL && !rtlpriv->proximity.proxim_on) { struct ieee80211_channel *channel = hw->conf.chandef.chan; From 5fc16fa5f13b3c06fdb959ef262050bd810416a2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 1 Jun 2024 12:25:35 -0600 Subject: [PATCH 168/537] io_uring: check for non-NULL file pointer in io_file_can_poll() In earlier kernels, it was possible to trigger a NULL pointer dereference off the forced async preparation path, if no file had been assigned. The trace leading to that looks as follows: BUG: kernel NULL pointer dereference, address: 00000000000000b0 PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP CPU: 67 PID: 1633 Comm: buf-ring-invali Not tainted 6.8.0-rc3+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS unknown 2/2/2022 RIP: 0010:io_buffer_select+0xc3/0x210 Code: 00 00 48 39 d1 0f 82 ae 00 00 00 48 81 4b 48 00 00 01 00 48 89 73 70 0f b7 50 0c 66 89 53 42 85 ed 0f 85 d2 00 00 00 48 8b 13 <48> 8b 92 b0 00 00 00 48 83 7a 40 00 0f 84 21 01 00 00 4c 8b 20 5b RSP: 0018:ffffb7bec38c7d88 EFLAGS: 00010246 RAX: ffff97af2be61000 RBX: ffff97af234f1700 RCX: 0000000000000040 RDX: 0000000000000000 RSI: ffff97aecfb04820 RDI: ffff97af234f1700 RBP: 0000000000000000 R08: 0000000000200030 R09: 0000000000000020 R10: ffffb7bec38c7dc8 R11: 000000000000c000 R12: ffffb7bec38c7db8 R13: ffff97aecfb05800 R14: ffff97aecfb05800 R15: ffff97af2be5e000 FS: 00007f852f74b740(0000) GS:ffff97b1eeec0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000000b0 CR3: 000000016deab005 CR4: 0000000000370ef0 Call Trace: ? __die+0x1f/0x60 ? page_fault_oops+0x14d/0x420 ? do_user_addr_fault+0x61/0x6a0 ? exc_page_fault+0x6c/0x150 ? asm_exc_page_fault+0x22/0x30 ? io_buffer_select+0xc3/0x210 __io_import_iovec+0xb5/0x120 io_readv_prep_async+0x36/0x70 io_queue_sqe_fallback+0x20/0x260 io_submit_sqes+0x314/0x630 __do_sys_io_uring_enter+0x339/0xbc0 ? __do_sys_io_uring_register+0x11b/0xc50 ? vm_mmap_pgoff+0xce/0x160 do_syscall_64+0x5f/0x180 entry_SYSCALL_64_after_hwframe+0x46/0x4e RIP: 0033:0x55e0a110a67e Code: ba cc 00 00 00 45 31 c0 44 0f b6 92 d0 00 00 00 31 d2 41 b9 08 00 00 00 41 83 e2 01 41 c1 e2 04 41 09 c2 b8 aa 01 00 00 0f 05 90 89 30 eb a9 0f 1f 40 00 48 8b 42 20 8b 00 a8 06 75 af 85 f6 because the request is marked forced ASYNC and has a bad file fd, and hence takes the forced async prep path. Current kernels with the request async prep cleaned up can no longer hit this issue, but for ease of backporting, let's add this safety check in here too as it really doesn't hurt. For both cases, this will inevitably end with a CQE posted with -EBADF. Cc: stable@vger.kernel.org Fixes: a76c0b31eef5 ("io_uring: commit non-pollable provided mapped buffers upfront") Signed-off-by: Jens Axboe --- io_uring/io_uring.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 624ca9076a50b..726e6367af4d3 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -433,7 +433,7 @@ static inline bool io_file_can_poll(struct io_kiocb *req) { if (req->flags & REQ_F_CAN_POLL) return true; - if (file_can_poll(req->file)) { + if (req->file && file_can_poll(req->file)) { req->flags |= REQ_F_CAN_POLL; return true; } From 30636258a7c9174be44ddb2318bae4f66d4beab0 Mon Sep 17 00:00:00 2001 From: Heng Qi Date: Thu, 30 May 2024 11:41:43 +0800 Subject: [PATCH 169/537] virtio_net: fix missing lock protection on control_buf access Refactored the handling of control_buf to be within the cvq_lock critical section, mitigating race conditions between reading device responses and new command submissions. Fixes: 6f45ab3e0409 ("virtio_net: Add a lock for the command VQ.") Signed-off-by: Heng Qi Reviewed-by: Hariprasad Kelam Acked-by: Jason Wang Reviewed-by: Xuan Zhuo Link: https://lore.kernel.org/r/20240530034143.19579-1-hengqi@linux.alibaba.com Signed-off-by: Jakub Kicinski --- drivers/net/virtio_net.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4a802c0ea2cbc..1ea8e6a24286e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2686,6 +2686,7 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd { struct scatterlist *sgs[5], hdr, stat; u32 out_num = 0, tmp, in_num = 0; + bool ok; int ret; /* Caller should know better */ @@ -2731,8 +2732,9 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd } unlock: + ok = vi->ctrl->status == VIRTIO_NET_OK; mutex_unlock(&vi->cvq_lock); - return vi->ctrl->status == VIRTIO_NET_OK; + return ok; } static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, From 7679935b8bdf8d2bc298b04ed579d1ed78e81907 Mon Sep 17 00:00:00 2001 From: Peter Geis Date: Wed, 29 May 2024 14:56:35 -0400 Subject: [PATCH 170/537] MAINTAINERS: remove Peter Geis The Motorcomm PHY driver is now maintained by the OEM. The driver has expanded far beyond my original purpose, and I do not have the hardware to test against the new portions of it. Therefore I am removing myself as a maintainer of the driver. Signed-off-by: Peter Geis Link: https://lore.kernel.org/r/20240529185635.538072-1-pgwipeout@gmail.com Signed-off-by: Jakub Kicinski --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index ba231d2f3a4e5..f7b8025e3c27c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15239,7 +15239,6 @@ F: drivers/staging/most/ F: include/linux/most.h MOTORCOMM PHY DRIVER -M: Peter Geis M: Frank L: netdev@vger.kernel.org S: Maintained From 89e281ebff72e6d37dce2df0e142b2909dafb267 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Wed, 29 May 2024 21:08:14 -0700 Subject: [PATCH 171/537] ethtool: init tsinfo stats if requested Statistic values should be set to ETHTOOL_STAT_NOT_SET even if the device doesn't support statistics. Otherwise zeros will be returned as if they are proper values: host# ethtool -I -T lo Time stamping parameters for lo: Capabilities: software-transmit software-receive software-system-clock PTP Hardware Clock: none Hardware Transmit Timestamp Modes: none Hardware Receive Filter Modes: none Statistics: tx_pkts: 0 tx_lost: 0 tx_err: 0 Fixes: 0e9c127729be ("ethtool: add interface to read Tx hardware timestamping statistics") Suggested-by: Jakub Kicinski Signed-off-by: Vadim Fedorenko Reviewed-by: Rahul Rameshbabu Link: https://lore.kernel.org/r/20240530040814.1014446-1-vadfed@meta.com Signed-off-by: Jakub Kicinski --- net/ethtool/tsinfo.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c index be2755c8d8fde..57d496287e523 100644 --- a/net/ethtool/tsinfo.c +++ b/net/ethtool/tsinfo.c @@ -38,11 +38,11 @@ static int tsinfo_prepare_data(const struct ethnl_req_info *req_base, ret = ethnl_ops_begin(dev); if (ret < 0) return ret; - if (req_base->flags & ETHTOOL_FLAG_STATS && - dev->ethtool_ops->get_ts_stats) { + if (req_base->flags & ETHTOOL_FLAG_STATS) { ethtool_stats_init((u64 *)&data->stats, sizeof(data->stats) / sizeof(u64)); - dev->ethtool_ops->get_ts_stats(dev, &data->stats); + if (dev->ethtool_ops->get_ts_stats) + dev->ethtool_ops->get_ts_stats(dev, &data->stats); } ret = __ethtool_get_ts_info(dev, &data->ts_info); ethnl_ops_complete(dev); From 9e0945b1901c9eed4fbee3b8a3870487b2bdc936 Mon Sep 17 00:00:00 2001 From: Heng Qi Date: Tue, 28 May 2024 21:41:15 +0800 Subject: [PATCH 172/537] virtio_net: fix possible dim status unrecoverable When the dim worker is scheduled, if it no longer needs to issue commands, dim may not be able to return to the working state later. For example, the following single queue scenario: 1. The dim worker of rxq0 is scheduled, and the dim status is changed to DIM_APPLY_NEW_PROFILE; 2. dim is disabled or parameters have not been modified; 3. virtnet_rx_dim_work exits directly; Then, even if net_dim is invoked again, it cannot work because the state is not restored to DIM_START_MEASURE. Fixes: 6208799553a8 ("virtio-net: support rx netdim") Signed-off-by: Heng Qi Reviewed-by: Jiri Pirko Acked-by: Michael S. Tsirkin Reviewed-by: Xuan Zhuo Link: https://lore.kernel.org/r/20240528134116.117426-2-hengqi@linux.alibaba.com Signed-off-by: Jakub Kicinski --- drivers/net/virtio_net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 1ea8e6a24286e..774a292a60906 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -4419,9 +4419,9 @@ static void virtnet_rx_dim_work(struct work_struct *work) if (err) pr_debug("%s: Failed to send dim parameters on rxq%d\n", dev->name, qnum); - dim->state = DIM_START_MEASURE; } out: + dim->state = DIM_START_MEASURE; mutex_unlock(&rq->dim_lock); } From d1f0bd01bc58f35b5353ad9dbe5f7249a8f3368e Mon Sep 17 00:00:00 2001 From: Heng Qi Date: Tue, 28 May 2024 21:41:16 +0800 Subject: [PATCH 173/537] virtio_net: fix a spurious deadlock issue When the following snippet is run, lockdep will report a deadlock[1]. /* Acquire all queues dim_locks */ for (i = 0; i < vi->max_queue_pairs; i++) mutex_lock(&vi->rq[i].dim_lock); There's no deadlock here because the vq locks are always taken in the same order, but lockdep can not figure it out. So refactoring the code to alleviate the problem. [1] ======================================================== WARNING: possible recursive locking detected 6.9.0-rc7+ #319 Not tainted -------------------------------------------- ethtool/962 is trying to acquire lock: but task is already holding lock: other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&vi->rq[i].dim_lock); lock(&vi->rq[i].dim_lock); *** DEADLOCK *** May be due to missing lock nesting notation 3 locks held by ethtool/962: #0: ffffffff82dbaab0 (cb_lock){++++}-{3:3}, at: genl_rcv+0x19/0x40 #1: ffffffff82dad0a8 (rtnl_mutex){+.+.}-{3:3}, at: ethnl_default_set_doit+0xbe/0x1e0 stack backtrace: CPU: 6 PID: 962 Comm: ethtool Not tainted 6.9.0-rc7+ #319 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 Call Trace: dump_stack_lvl+0x79/0xb0 check_deadlock+0x130/0x220 __lock_acquire+0x861/0x990 lock_acquire.part.0+0x72/0x1d0 ? lock_acquire+0xf8/0x130 __mutex_lock+0x71/0xd50 virtnet_set_coalesce+0x151/0x190 __ethnl_set_coalesce.isra.0+0x3f8/0x4d0 ethnl_set_coalesce+0x34/0x90 ethnl_default_set_doit+0xdd/0x1e0 genl_family_rcv_msg_doit+0xdc/0x130 genl_family_rcv_msg+0x154/0x230 ? __pfx_ethnl_default_set_doit+0x10/0x10 genl_rcv_msg+0x4b/0xa0 ? __pfx_genl_rcv_msg+0x10/0x10 netlink_rcv_skb+0x5a/0x110 genl_rcv+0x28/0x40 netlink_unicast+0x1af/0x280 netlink_sendmsg+0x20e/0x460 __sys_sendto+0x1fe/0x210 ? find_held_lock+0x2b/0x80 ? do_user_addr_fault+0x3a2/0x8a0 ? __lock_release+0x5e/0x160 ? do_user_addr_fault+0x3a2/0x8a0 ? lock_release+0x72/0x140 ? do_user_addr_fault+0x3a7/0x8a0 __x64_sys_sendto+0x29/0x30 do_syscall_64+0x78/0x180 entry_SYSCALL_64_after_hwframe+0x76/0x7e Fixes: 4d4ac2ececd3 ("virtio_net: Add a lock for per queue RX coalesce") Signed-off-by: Heng Qi Acked-by: Michael S. Tsirkin Reviewed-by: Xuan Zhuo Acked-by: Jason Wang Link: https://lore.kernel.org/r/20240528134116.117426-3-hengqi@linux.alibaba.com Signed-off-by: Jakub Kicinski --- drivers/net/virtio_net.c | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 774a292a60906..61a57d134544f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -4259,7 +4259,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL; bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; struct scatterlist sgs_rx; - int ret = 0; int i; if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) @@ -4269,27 +4268,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) return -EINVAL; - /* Acquire all queues dim_locks */ - for (i = 0; i < vi->max_queue_pairs; i++) - mutex_lock(&vi->rq[i].dim_lock); - if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { vi->rx_dim_enabled = true; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + mutex_lock(&vi->rq[i].dim_lock); vi->rq[i].dim_enabled = true; - goto unlock; + mutex_unlock(&vi->rq[i].dim_lock); + } + return 0; } coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL); - if (!coal_rx) { - ret = -ENOMEM; - goto unlock; - } + if (!coal_rx) + return -ENOMEM; if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { vi->rx_dim_enabled = false; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + mutex_lock(&vi->rq[i].dim_lock); vi->rq[i].dim_enabled = false; + mutex_unlock(&vi->rq[i].dim_lock); + } } /* Since the per-queue coalescing params can be set, @@ -4302,22 +4301,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, - &sgs_rx)) { - ret = -EINVAL; - goto unlock; - } + &sgs_rx)) + return -EINVAL; vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; for (i = 0; i < vi->max_queue_pairs; i++) { + mutex_lock(&vi->rq[i].dim_lock); vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs; vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames; - } -unlock: - for (i = vi->max_queue_pairs - 1; i >= 0; i--) mutex_unlock(&vi->rq[i].dim_lock); + } - return ret; + return 0; } static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, From 3c34fb0bd4a4237592c5ecb5b2e2531900c55774 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Wed, 29 May 2024 17:02:43 -0400 Subject: [PATCH 174/537] ax25: Fix refcount imbalance on inbound connections When releasing a socket in ax25_release(), we call netdev_put() to decrease the refcount on the associated ax.25 device. However, the execution path for accepting an incoming connection never calls netdev_hold(). This imbalance leads to refcount errors, and ultimately to kernel crashes. A typical call trace for the above situation will start with one of the following errors: refcount_t: decrement hit 0; leaking memory. refcount_t: underflow; use-after-free. And will then have a trace like: Call Trace: ? show_regs+0x64/0x70 ? __warn+0x83/0x120 ? refcount_warn_saturate+0xb2/0x100 ? report_bug+0x158/0x190 ? prb_read_valid+0x20/0x30 ? handle_bug+0x3e/0x70 ? exc_invalid_op+0x1c/0x70 ? asm_exc_invalid_op+0x1f/0x30 ? refcount_warn_saturate+0xb2/0x100 ? refcount_warn_saturate+0xb2/0x100 ax25_release+0x2ad/0x360 __sock_release+0x35/0xa0 sock_close+0x19/0x20 [...] On reboot (or any attempt to remove the interface), the kernel gets stuck in an infinite loop: unregister_netdevice: waiting for ax0 to become free. Usage count = 0 This patch corrects these issues by ensuring that we call netdev_hold() and ax25_dev_hold() for new connections in ax25_accept(). This makes the logic leading to ax25_accept() match the logic for ax25_bind(): in both cases we increment the refcount, which is ultimately decremented in ax25_release(). Fixes: 9fd75b66b8f6 ("ax25: Fix refcount leaks caused by ax25_cb_del()") Signed-off-by: Lars Kellogg-Stedman Tested-by: Duoming Zhou Tested-by: Dan Cross Tested-by: Chris Maness Reviewed-by: Dan Carpenter Link: https://lore.kernel.org/r/20240529210242.3346844-2-lars@oddbit.com Signed-off-by: Jakub Kicinski --- net/ax25/af_ax25.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 8077cf2ee4480..d6f9fae06a9d8 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, { struct sk_buff *skb; struct sock *newsk; + ax25_dev *ax25_dev; DEFINE_WAIT(wait); struct sock *sk; + ax25_cb *ax25; int err = 0; if (sock->state != SS_UNCONNECTED) @@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, kfree_skb(skb); sk_acceptq_removed(sk); newsock->state = SS_CONNECTED; + ax25 = sk_to_ax25(newsk); + ax25_dev = ax25->ax25_dev; + netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC); + ax25_dev_hold(ax25_dev); out: release_sock(sk); From 166fcf86cd34e15c7f383eda4642d7a212393008 Mon Sep 17 00:00:00 2001 From: Duoming Zhou Date: Thu, 30 May 2024 13:17:33 +0800 Subject: [PATCH 175/537] ax25: Replace kfree() in ax25_dev_free() with ax25_dev_put() The object "ax25_dev" is managed by reference counting. Thus it should not be directly released by kfree(), replace with ax25_dev_put(). Fixes: d01ffb9eee4a ("ax25: add refcount in ax25_dev to avoid UAF bugs") Suggested-by: Dan Carpenter Signed-off-by: Duoming Zhou Reviewed-by: Dan Carpenter Link: https://lore.kernel.org/r/20240530051733.11416-1-duoming@zju.edu.cn Signed-off-by: Jakub Kicinski --- net/ax25/ax25_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 742d7c68e7e7e..9efd6690b3443 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -196,7 +196,7 @@ void __exit ax25_dev_free(void) list_for_each_entry_safe(s, n, &ax25_dev_list, list) { netdev_put(s->dev, &s->dev_tracker); list_del(&s->list); - kfree(s); + ax25_dev_put(s); } spin_unlock_bh(&ax25_dev_lock); } From 8105378c0c02309221b63d15cfe8fe92e7fe1434 Mon Sep 17 00:00:00 2001 From: Jason Xing Date: Thu, 30 May 2024 11:27:17 +0800 Subject: [PATCH 176/537] net: rps: fix error when CONFIG_RFS_ACCEL is off John Sperbeck reported that if we turn off CONFIG_RFS_ACCEL, the 'head' is not defined, which will trigger compile error. So I move the 'head' out of the CONFIG_RFS_ACCEL scope. Fixes: 84b6823cd96b ("net: rps: protect last_qtail with rps_input_queue_tail_save() helper") Reported-by: John Sperbeck Closes: https://lore.kernel.org/all/20240529203421.2432481-1-jsperbeck@google.com/ Signed-off-by: Jason Xing Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20240530032717.57787-1-kerneljasonxing@gmail.com Signed-off-by: Jakub Kicinski --- net/core/dev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index e1bb6d7856d92..4d4de9008f6f3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4516,12 +4516,13 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *rflow, u16 next_cpu) { if (next_cpu < nr_cpu_ids) { + u32 head; #ifdef CONFIG_RFS_ACCEL struct netdev_rx_queue *rxqueue; struct rps_dev_flow_table *flow_table; struct rps_dev_flow *old_rflow; - u32 flow_id, head; u16 rxq_index; + u32 flow_id; int rc; /* Should we steer this flow to a different hardware queue? */ From e85e271dec0270982afed84f70dc37703fcc1d52 Mon Sep 17 00:00:00 2001 From: DelphineCCChiu Date: Wed, 29 May 2024 14:58:55 +0800 Subject: [PATCH 177/537] net/ncsi: Fix the multi thread manner of NCSI driver Currently NCSI driver will send several NCSI commands back to back without waiting the response of previous NCSI command or timeout in some state when NIC have multi channel. This operation against the single thread manner defined by NCSI SPEC(section 6.3.2.3 in DSP0222_1.1.1) According to NCSI SPEC(section 6.2.13.1 in DSP0222_1.1.1), we should probe one channel at a time by sending NCSI commands (Clear initial state, Get version ID, Get capabilities...), than repeat this steps until the max number of channels which we got from NCSI command (Get capabilities) has been probed. Fixes: e6f44ed6d04d ("net/ncsi: Package and channel management") Signed-off-by: DelphineCCChiu Link: https://lore.kernel.org/r/20240529065856.825241-1-delphine_cc_chiu@wiwynn.com Signed-off-by: Jakub Kicinski --- net/ncsi/internal.h | 2 ++ net/ncsi/ncsi-manage.c | 73 +++++++++++++++++++++--------------------- net/ncsi/ncsi-rsp.c | 4 ++- 3 files changed, 41 insertions(+), 38 deletions(-) diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index 374412ed780b6..ef0f8f73826f5 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h @@ -325,6 +325,7 @@ struct ncsi_dev_priv { spinlock_t lock; /* Protect the NCSI device */ unsigned int package_probe_id;/* Current ID during probe */ unsigned int package_num; /* Number of packages */ + unsigned int channel_probe_id;/* Current cahnnel ID during probe */ struct list_head packages; /* List of packages */ struct ncsi_channel *hot_channel; /* Channel was ever active */ struct ncsi_request requests[256]; /* Request table */ @@ -343,6 +344,7 @@ struct ncsi_dev_priv { bool multi_package; /* Enable multiple packages */ bool mlx_multi_host; /* Enable multi host Mellanox */ u32 package_whitelist; /* Packages to configure */ + unsigned char channel_count; /* Num of channels to probe */ }; struct ncsi_cmd_arg { diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 745c788f1d1df..5ecf611c88200 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp) break; case ncsi_dev_state_suspend_gls: - ndp->pending_req_num = np->channel_num; + ndp->pending_req_num = 1; nca.type = NCSI_PKT_CMD_GLS; nca.package = np->id; + nca.channel = ndp->channel_probe_id; + ret = ncsi_xmit_cmd(&nca); + if (ret) + goto error; + ndp->channel_probe_id++; - nd->state = ncsi_dev_state_suspend_dcnt; - NCSI_FOR_EACH_CHANNEL(np, nc) { - nca.channel = nc->id; - ret = ncsi_xmit_cmd(&nca); - if (ret) - goto error; + if (ndp->channel_probe_id == ndp->channel_count) { + ndp->channel_probe_id = 0; + nd->state = ncsi_dev_state_suspend_dcnt; } break; @@ -1345,7 +1347,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) { struct ncsi_dev *nd = &ndp->ndev; struct ncsi_package *np; - struct ncsi_channel *nc; struct ncsi_cmd_arg nca; unsigned char index; int ret; @@ -1423,23 +1424,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) nd->state = ncsi_dev_state_probe_cis; break; - case ncsi_dev_state_probe_cis: - ndp->pending_req_num = NCSI_RESERVED_CHANNEL; - - /* Clear initial state */ - nca.type = NCSI_PKT_CMD_CIS; - nca.package = ndp->active_package->id; - for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) { - nca.channel = index; - ret = ncsi_xmit_cmd(&nca); - if (ret) - goto error; - } - - nd->state = ncsi_dev_state_probe_gvi; - if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY)) - nd->state = ncsi_dev_state_probe_keep_phy; - break; case ncsi_dev_state_probe_keep_phy: ndp->pending_req_num = 1; @@ -1452,14 +1436,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) nd->state = ncsi_dev_state_probe_gvi; break; + case ncsi_dev_state_probe_cis: case ncsi_dev_state_probe_gvi: case ncsi_dev_state_probe_gc: case ncsi_dev_state_probe_gls: np = ndp->active_package; - ndp->pending_req_num = np->channel_num; + ndp->pending_req_num = 1; - /* Retrieve version, capability or link status */ - if (nd->state == ncsi_dev_state_probe_gvi) + /* Clear initial state Retrieve version, capability or link status */ + if (nd->state == ncsi_dev_state_probe_cis) + nca.type = NCSI_PKT_CMD_CIS; + else if (nd->state == ncsi_dev_state_probe_gvi) nca.type = NCSI_PKT_CMD_GVI; else if (nd->state == ncsi_dev_state_probe_gc) nca.type = NCSI_PKT_CMD_GC; @@ -1467,19 +1454,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) nca.type = NCSI_PKT_CMD_GLS; nca.package = np->id; - NCSI_FOR_EACH_CHANNEL(np, nc) { - nca.channel = nc->id; - ret = ncsi_xmit_cmd(&nca); - if (ret) - goto error; - } + nca.channel = ndp->channel_probe_id; - if (nd->state == ncsi_dev_state_probe_gvi) + ret = ncsi_xmit_cmd(&nca); + if (ret) + goto error; + + if (nd->state == ncsi_dev_state_probe_cis) { + nd->state = ncsi_dev_state_probe_gvi; + if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0) + nd->state = ncsi_dev_state_probe_keep_phy; + } else if (nd->state == ncsi_dev_state_probe_gvi) { nd->state = ncsi_dev_state_probe_gc; - else if (nd->state == ncsi_dev_state_probe_gc) + } else if (nd->state == ncsi_dev_state_probe_gc) { nd->state = ncsi_dev_state_probe_gls; - else + } else { + nd->state = ncsi_dev_state_probe_cis; + ndp->channel_probe_id++; + } + + if (ndp->channel_probe_id == ndp->channel_count) { + ndp->channel_probe_id = 0; nd->state = ncsi_dev_state_probe_dp; + } break; case ncsi_dev_state_probe_dp: ndp->pending_req_num = 1; @@ -1780,6 +1777,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev, ndp->requests[i].ndp = ndp; timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0); } + ndp->channel_count = NCSI_RESERVED_CHANNEL; spin_lock_irqsave(&ncsi_dev_lock, flags); list_add_tail_rcu(&ndp->node, &ncsi_dev_list); @@ -1813,6 +1811,7 @@ int ncsi_start_dev(struct ncsi_dev *nd) if (!(ndp->flags & NCSI_DEV_PROBED)) { ndp->package_probe_id = 0; + ndp->channel_probe_id = 0; nd->state = ncsi_dev_state_probe; schedule_work(&ndp->work); return 0; diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index bee290d0f48b6..e28be33bdf2c4 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr) struct ncsi_rsp_gc_pkt *rsp; struct ncsi_dev_priv *ndp = nr->ndp; struct ncsi_channel *nc; + struct ncsi_package *np; size_t size; /* Find the channel */ rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp); ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel, - NULL, &nc); + &np, &nc); if (!nc) return -ENODEV; @@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr) */ nc->vlan_filter.bitmap = U64_MAX; nc->vlan_filter.n_vids = rsp->vlan_cnt; + np->ndp->channel_count = rsp->channel_cnt; return 0; } From 33700a0c9b562700c28d31360a5f04508f459a45 Mon Sep 17 00:00:00 2001 From: Dmitry Safonov <0x7f454c46@gmail.com> Date: Wed, 29 May 2024 18:29:32 +0100 Subject: [PATCH 178/537] net/tcp: Don't consider TCP_CLOSE in TCP_AO_ESTABLISHED TCP_CLOSE may or may not have current/rnext keys and should not be considered "established". The fast-path for TCP_CLOSE is SKB_DROP_REASON_TCP_CLOSE. This is what tcp_rcv_state_process() does anyways. Add an early drop path to not spend any time verifying segment signatures for sockets in TCP_CLOSE state. Cc: stable@vger.kernel.org # v6.7 Fixes: 0a3a809089eb ("net/tcp: Verify inbound TCP-AO signed segments") Signed-off-by: Dmitry Safonov <0x7f454c46@gmail.com> Link: https://lore.kernel.org/r/20240529-tcp_ao-sk_state-v1-1-d69b5d323c52@gmail.com Signed-off-by: Jakub Kicinski --- include/net/tcp_ao.h | 7 ++++--- net/ipv4/tcp_ao.c | 13 +++++++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h index 471e177362b4c..5d8e9ed2c0056 100644 --- a/include/net/tcp_ao.h +++ b/include/net/tcp_ao.h @@ -86,7 +86,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key) struct tcp_ao_info { /* List of tcp_ao_key's */ struct hlist_head head; - /* current_key and rnext_key aren't maintained on listen sockets. + /* current_key and rnext_key are maintained on sockets + * in TCP_AO_ESTABLISHED states. * Their purpose is to cache keys on established connections, * saving needless lookups. Never dereference any of them from * listen sockets. @@ -201,9 +202,9 @@ struct tcp6_ao_context { }; struct tcp_sigpool; +/* Established states are fast-path and there always is current_key/rnext_key */ #define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \ - TCPF_CLOSE | TCPF_CLOSE_WAIT | \ - TCPF_LAST_ACK | TCPF_CLOSING) + TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING) int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb, struct tcp_ao_key *key, struct tcphdr *th, diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c index 781b67a525719..37c42b63ff993 100644 --- a/net/ipv4/tcp_ao.c +++ b/net/ipv4/tcp_ao.c @@ -933,6 +933,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, struct tcp_ao_key *key; __be32 sisn, disn; u8 *traffic_key; + int state; u32 sne = 0; info = rcu_dereference(tcp_sk(sk)->ao_info); @@ -948,8 +949,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, disn = 0; } + state = READ_ONCE(sk->sk_state); /* Fast-path */ - if (likely((1 << sk->sk_state) & TCP_AO_ESTABLISHED)) { + if (likely((1 << state) & TCP_AO_ESTABLISHED)) { enum skb_drop_reason err; struct tcp_ao_key *current_key; @@ -988,6 +990,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, return SKB_NOT_DROPPED_YET; } + if (unlikely(state == TCP_CLOSE)) + return SKB_DROP_REASON_TCP_CLOSE; + /* Lookup key based on peer address and keyid. * current_key and rnext_key must not be used on tcp listen * sockets as otherwise: @@ -1001,7 +1006,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, if (th->syn && !th->ack) goto verify_hash; - if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) { + if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) { /* Make the initial syn the likely case here */ if (unlikely(req)) { sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn, @@ -1018,14 +1023,14 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, /* no way to figure out initial sisn/disn - drop */ return SKB_DROP_REASON_TCP_FLAGS; } - } else if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { + } else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { disn = info->lisn; if (th->syn || th->rst) sisn = th->seq; else sisn = info->risn; } else { - WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", sk->sk_state); + WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state); return SKB_DROP_REASON_TCP_AOFAILURE; } verify_hash: From d7bd473632d07f8a54655c270c0940cc3671c548 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Wed, 29 May 2024 15:47:17 +0000 Subject: [PATCH 179/537] iio: imu: inv_icm42600: stabilized timestamp in interrupt Use IRQF_ONESHOT flag to ensure the timestamp is not updated in the hard handler during the thread handler. And compute and use the effective watermark value that correspond to this first timestamp. This way we can ensure the timestamp is always corresponding to the value used by the timestamping mechanism. Otherwise, it is possible that between FIFO count read and FIFO processing the timestamp is overwritten in the hard handler. Fixes: ec74ae9fd37c ("iio: imu: inv_icm42600: add accurate timestamping") Cc: stable@vger.kernel.org Signed-off-by: Jean-Baptiste Maneyrol Link: https://lore.kernel.org/r/20240529154717.651863-1-inv.git-commit@tdk.com Signed-off-by: Jonathan Cameron --- .../imu/inv_icm42600/inv_icm42600_buffer.c | 19 +++++++++++++++++-- .../imu/inv_icm42600/inv_icm42600_buffer.h | 2 ++ .../iio/imu/inv_icm42600/inv_icm42600_core.c | 1 + 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c index 63b85ec88c131..a8cf74c84c3c4 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c @@ -222,10 +222,15 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st) latency_accel = period_accel * wm_accel; /* 0 value for watermark means that the sensor is turned off */ + if (wm_gyro == 0 && wm_accel == 0) + return 0; + if (latency_gyro == 0) { watermark = wm_accel; + st->fifo.watermark.eff_accel = wm_accel; } else if (latency_accel == 0) { watermark = wm_gyro; + st->fifo.watermark.eff_gyro = wm_gyro; } else { /* compute the smallest latency that is a multiple of both */ if (latency_gyro <= latency_accel) @@ -241,6 +246,13 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st) watermark = latency / period; if (watermark < 1) watermark = 1; + /* update effective watermark */ + st->fifo.watermark.eff_gyro = latency / period_gyro; + if (st->fifo.watermark.eff_gyro < 1) + st->fifo.watermark.eff_gyro = 1; + st->fifo.watermark.eff_accel = latency / period_accel; + if (st->fifo.watermark.eff_accel < 1) + st->fifo.watermark.eff_accel = 1; } /* compute watermark value in bytes */ @@ -514,7 +526,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st) /* handle gyroscope timestamp and FIFO data parsing */ if (st->fifo.nb.gyro > 0) { ts = &gyro_st->ts; - inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro, + inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_gyro, st->timestamp.gyro); ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro); if (ret) @@ -524,7 +536,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st) /* handle accelerometer timestamp and FIFO data parsing */ if (st->fifo.nb.accel > 0) { ts = &accel_st->ts; - inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel, + inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_accel, st->timestamp.accel); ret = inv_icm42600_accel_parse_fifo(st->indio_accel); if (ret) @@ -577,6 +589,9 @@ int inv_icm42600_buffer_init(struct inv_icm42600_state *st) unsigned int val; int ret; + st->fifo.watermark.eff_gyro = 1; + st->fifo.watermark.eff_accel = 1; + /* * Default FIFO configuration (bits 7 to 5) * - use invalid value diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h index 8b85ee333bf8f..f6c85daf42b00 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.h @@ -32,6 +32,8 @@ struct inv_icm42600_fifo { struct { unsigned int gyro; unsigned int accel; + unsigned int eff_gyro; + unsigned int eff_accel; } watermark; size_t count; struct { diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index 96116a68ab291..62fdae530334a 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -537,6 +537,7 @@ static int inv_icm42600_irq_init(struct inv_icm42600_state *st, int irq, if (ret) return ret; + irq_type |= IRQF_ONESHOT; return devm_request_threaded_irq(dev, irq, inv_icm42600_irq_timestamp, inv_icm42600_irq_handler, irq_type, "inv_icm42600", st); From 245f3b149e6cc3ac6ee612cdb7042263bfc9e73c Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Mon, 27 May 2024 21:00:08 +0000 Subject: [PATCH 180/537] iio: imu: inv_icm42600: delete unneeded update watermark call Update watermark will be done inside the hwfifo_set_watermark callback just after the update_scan_mode. It is useless to do it here. Fixes: 7f85e42a6c54 ("iio: imu: inv_icm42600: add buffer support in iio devices") Cc: stable@vger.kernel.org Signed-off-by: Jean-Baptiste Maneyrol Link: https://lore.kernel.org/r/20240527210008.612932-1-inv.git-commit@tdk.com Signed-off-by: Jonathan Cameron --- drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c | 4 ---- drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c | 4 ---- 2 files changed, 8 deletions(-) diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c index 83d8504ebfff4..4b25666936146 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c @@ -130,10 +130,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev, /* update data FIFO write */ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0); ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en); - if (ret) - goto out_unlock; - - ret = inv_icm42600_buffer_update_watermark(st); out_unlock: mutex_unlock(&st->lock); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c index e6f8de80128c4..938af5b640b00 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c @@ -130,10 +130,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev, /* update data FIFO write */ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0); ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en); - if (ret) - goto out_unlock; - - ret = inv_icm42600_buffer_update_watermark(st); out_unlock: mutex_unlock(&st->lock); From 6c3ca6654a74dd396bc477839ba8d9792eced441 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 3 Jun 2024 15:45:53 +0800 Subject: [PATCH 181/537] LoongArch: Remove CONFIG_ACPI_TABLE_UPGRADE in platform_init() Both acpi_table_upgrade() and acpi_boot_table_init() are defined as empty functions under !CONFIG_ACPI_TABLE_UPGRADE and !CONFIG_ACPI in include/linux/acpi.h, there are no implicit declaration errors with various configs. #ifdef CONFIG_ACPI_TABLE_UPGRADE void acpi_table_upgrade(void); #else static inline void acpi_table_upgrade(void) { } #endif #ifdef CONFIG_ACPI ... void acpi_boot_table_init (void); ... #else /* !CONFIG_ACPI */ ... static inline void acpi_boot_table_init(void) { } ... #endif /* !CONFIG_ACPI */ As Huacai suggested, CONFIG_ACPI_TABLE_UPGRADE is ugly and not necessary here, just remove it. At the same time, just keep CONFIG_ACPI to prevent potential build errors in future, and give a signal to indicate the code is ACPI-specific. For the same reason, we also put acpi_table_upgrade() under CONFIG_ACPI. Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen --- arch/loongarch/kernel/setup.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 60e0fe97f61a3..89ad756aeeed2 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -351,10 +351,8 @@ void __init platform_init(void) arch_reserve_vmcore(); arch_reserve_crashkernel(); -#ifdef CONFIG_ACPI_TABLE_UPGRADE - acpi_table_upgrade(); -#endif #ifdef CONFIG_ACPI + acpi_table_upgrade(); acpi_gbl_use_default_register_widths = false; acpi_boot_table_init(); #endif From b56f67a6c748bb009f313f91651c8020d2338d63 Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Mon, 3 Jun 2024 15:45:53 +0800 Subject: [PATCH 182/537] LoongArch: Fix built-in DTB detection fdt_check_header(__dtb_start) will always success because kernel provides a dummy dtb, and by coincidence __dtb_start clashed with entry of this dummy dtb. The consequence is fdt passed from firmware will never be taken. Fix by trying to utilise __dtb_start only when CONFIG_BUILTIN_DTB is enabled. Cc: stable@vger.kernel.org Fixes: 7b937cc243e5 ("of: Create of_root if no dtb provided by firmware") Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 89ad756aeeed2..3d048f1be1438 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -282,7 +282,7 @@ static void __init fdt_setup(void) return; /* Prefer to use built-in dtb, checking its legality first. */ - if (!fdt_check_header(__dtb_start)) + if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start)) fdt_pointer = __dtb_start; else fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */ From 3de9c42d02a79a5e09bbee7a4421ddc00cfd5c6d Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Mon, 3 Jun 2024 15:45:53 +0800 Subject: [PATCH 183/537] LoongArch: Add all CPUs enabled by fdt to NUMA node 0 NUMA enabled kernel on FDT based machine fails to boot because CPUs are all in NUMA_NO_NODE and mm subsystem won't accept that. Fix by adding them to default NUMA node at FDT parsing phase and move numa_add_cpu(0) to a later point. Cc: stable@vger.kernel.org Fixes: 88d4d957edc7 ("LoongArch: Add FDT booting support from efi system table") Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/numa.h | 1 + arch/loongarch/kernel/smp.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h index 27f319b498625..b5f9de9f102e4 100644 --- a/arch/loongarch/include/asm/numa.h +++ b/arch/loongarch/include/asm/numa.h @@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu); static inline void early_numa_add_cpu(int cpuid, s16 node) { } static inline void numa_add_cpu(unsigned int cpu) { } static inline void numa_remove_cpu(unsigned int cpu) { } +static inline void set_cpuid_to_node(int cpuid, s16 node) { } static inline int early_cpu_to_node(int cpu) { diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 0dfe2388ef413..1436d2465939b 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -273,7 +273,6 @@ static void __init fdt_smp_setup(void) if (cpuid == loongson_sysconf.boot_cpu_id) { cpu = 0; - numa_add_cpu(cpu); } else { cpu = cpumask_next_zero(-1, cpu_present_mask); } @@ -283,6 +282,9 @@ static void __init fdt_smp_setup(void) set_cpu_present(cpu, true); __cpu_number_map[cpuid] = cpu; __cpu_logical_map[cpu] = cpuid; + + early_numa_add_cpu(cpu, 0); + set_cpuid_to_node(cpuid, 0); } loongson_sysconf.nr_cpus = num_processors; @@ -468,6 +470,7 @@ void smp_prepare_boot_cpu(void) set_cpu_possible(0, true); set_cpu_online(0, true); set_my_cpu_offset(per_cpu_offset(0)); + numa_add_cpu(0); rr_node = first_node(node_online_map); for_each_possible_cpu(cpu) { From beb2800074c15362cf9f6c7301120910046d6556 Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Mon, 3 Jun 2024 15:45:53 +0800 Subject: [PATCH 184/537] LoongArch: Fix entry point in kernel image header Currently kernel entry in head.S is in DMW address range, firmware is instructed to jump to this address after loading the kernel image. However kernel should not make any assumption on firmware's DMW setting, thus the entry point should be a physical address falls into direct translation region. Fix by converting entry address to physical and amend entry calculation logic in libstub accordingly. BTW, use ABSOLUTE() to calculate variables to make Clang/LLVM happy. Cc: stable@vger.kernel.org Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/kernel/head.S | 2 +- arch/loongarch/kernel/vmlinux.lds.S | 10 ++++++---- drivers/firmware/efi/libstub/loongarch.c | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index c4f7de2e28054..4677ea8fa8e98 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -22,7 +22,7 @@ _head: .word MZ_MAGIC /* "MZ", MS-DOS header */ .org 0x8 - .dword kernel_entry /* Kernel entry point */ + .dword _kernel_entry /* Kernel entry point (physical address) */ .dword _kernel_asize /* Kernel image effective size */ .quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */ .org 0x38 /* 0x20 ~ 0x37 reserved */ diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index e8e97dbf9ca40..3c7595342730e 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -6,6 +6,7 @@ #define PAGE_SIZE _PAGE_SIZE #define RO_EXCEPTION_TABLE_ALIGN 4 +#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */ /* * Put .bss..swapper_pg_dir as the first thing in .bss. This will @@ -142,10 +143,11 @@ SECTIONS #ifdef CONFIG_EFI_STUB /* header symbols */ - _kernel_asize = _end - _text; - _kernel_fsize = _edata - _text; - _kernel_vsize = _end - __initdata_begin; - _kernel_rsize = _edata - __initdata_begin; + _kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK); + _kernel_asize = ABSOLUTE(_end - _text); + _kernel_fsize = ABSOLUTE(_edata - _text); + _kernel_vsize = ABSOLUTE(_end - __initdata_begin); + _kernel_rsize = ABSOLUTE(_edata - __initdata_begin); #endif .gptab.sdata : { diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c index 684c9354637c6..d0ef93551c44f 100644 --- a/drivers/firmware/efi/libstub/loongarch.c +++ b/drivers/firmware/efi/libstub/loongarch.c @@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv) unsigned long __weak kernel_entry_address(unsigned long kernel_addr, efi_loaded_image_t *image) { - return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr; + return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr; } efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, From 1098efd299ffe9c8af818425338c7f6c4f930a98 Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Mon, 3 Jun 2024 15:45:53 +0800 Subject: [PATCH 185/537] LoongArch: Override higher address bits in JUMP_VIRT_ADDR In JUMP_VIRT_ADDR we are performing an or calculation on address value directly from pcaddi. This will only work if we are currently running from direct 1:1 mapping addresses or firmware's DMW is configured exactly same as kernel. Still, we should not rely on such assumption. Fix by overriding higher bits in address comes from pcaddi, so we can get rid of or operator. Cc: stable@vger.kernel.org Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen --- arch/loongarch/include/asm/stackframe.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index 45b507a7b06fc..d9eafd3ee3d1e 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -42,7 +42,7 @@ .macro JUMP_VIRT_ADDR temp1 temp2 li.d \temp1, CACHE_BASE pcaddi \temp2, 0 - or \temp1, \temp1, \temp2 + bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0 jirl zero, \temp1, 0xc .endm From eb36e520f4f1b690fd776f15cbac452f82ff7bfa Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Mon, 3 Jun 2024 15:45:53 +0800 Subject: [PATCH 186/537] LoongArch: Fix GMAC's phy-mode definitions in dts The GMAC of Loongson chips cannot insert the correct 1.5-2ns delay. So we need the PHY to insert internal delays for both transmit and receive data lines from/to the PHY device. Fix this by changing the "phy-mode" from "rgmii" to "rgmii-id" in dts. Signed-off-by: Huacai Chen --- arch/loongarch/boot/dts/loongson-2k0500-ref.dts | 4 ++-- arch/loongarch/boot/dts/loongson-2k1000-ref.dts | 4 ++-- arch/loongarch/boot/dts/loongson-2k2000-ref.dts | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts index 8aefb0c126722..a34734a6c3ce8 100644 --- a/arch/loongarch/boot/dts/loongson-2k0500-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k0500-ref.dts @@ -44,14 +44,14 @@ &gmac0 { status = "okay"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; bus_id = <0x0>; }; &gmac1 { status = "okay"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; bus_id = <0x1>; }; diff --git a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts index 8463fe035386e..23cf26cc3e5f1 100644 --- a/arch/loongarch/boot/dts/loongson-2k1000-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k1000-ref.dts @@ -43,7 +43,7 @@ &gmac0 { status = "okay"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&phy0>; mdio { compatible = "snps,dwmac-mdio"; @@ -58,7 +58,7 @@ &gmac1 { status = "okay"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&phy1>; mdio { compatible = "snps,dwmac-mdio"; diff --git a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts index 74b99bd234cc3..ea9e6985d0e9f 100644 --- a/arch/loongarch/boot/dts/loongson-2k2000-ref.dts +++ b/arch/loongarch/boot/dts/loongson-2k2000-ref.dts @@ -92,7 +92,7 @@ &gmac2 { status = "okay"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; phy-handle = <&phy2>; mdio { compatible = "snps,dwmac-mdio"; From 629f2b4e05225e53125aaf7ff0b87d5d53897128 Mon Sep 17 00:00:00 2001 From: Chen Ni Date: Tue, 28 May 2024 11:08:32 +0800 Subject: [PATCH 187/537] drm/panel: sitronix-st7789v: Add check for of_drm_get_panel_orientation Add check for the return value of of_drm_get_panel_orientation() and return the error if it fails in order to catch the error. Fixes: b27c0f6d208d ("drm/panel: sitronix-st7789v: add panel orientation support") Signed-off-by: Chen Ni Reviewed-by: Michael Riesch Acked-by: Jessica Zhang Link: https://lore.kernel.org/r/20240528030832.2529471-1-nichen@iscas.ac.cn Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20240528030832.2529471-1-nichen@iscas.ac.cn --- drivers/gpu/drm/panel/panel-sitronix-st7789v.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index e8f385b9c6182..28bfc48a91272 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi) if (ret) return dev_err_probe(dev, ret, "Failed to get backlight\n"); - of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation); + ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation); + if (ret) + return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n"); drm_panel_add(&ctx->panel); From 38a38f5a36da9820680d413972cb733349400532 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 25 May 2024 21:38:53 +0200 Subject: [PATCH 188/537] Input: silead - Always support 10 fingers When support for Silead touchscreens was orginal added some touchscreens with older firmware versions only supported 5 fingers and this was made the default requiring the setting of a "silead,max-fingers=10" uint32 device-property for all touchscreen models which do support 10 fingers. There are very few models with the old 5 finger fw, so in practice the setting of the "silead,max-fingers=10" is boilerplate which needs to be copy and pasted to every touchscreen config. Reporting that 10 fingers are supported on devices which only support 5 fingers doesn't cause any problems for userspace in practice, since at max 4 finger gestures are supported anyways. Drop the max_fingers configuration and simply always assume 10 fingers. Signed-off-by: Hans de Goede Acked-by: Dmitry Torokhov Link: https://lore.kernel.org/r/20240525193854.39130-2-hdegoede@redhat.com --- drivers/input/touchscreen/silead.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index bbd366dcb69af..6a42b27c45990 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c @@ -71,7 +71,6 @@ struct silead_ts_data { struct regulator_bulk_data regulators[2]; char fw_name[64]; struct touchscreen_properties prop; - u32 max_fingers; u32 chip_id; struct input_mt_pos pos[SILEAD_MAX_FINGERS]; int slots[SILEAD_MAX_FINGERS]; @@ -136,7 +135,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data) touchscreen_parse_properties(data->input, true, &data->prop); silead_apply_efi_fw_min_max(data); - input_mt_init_slots(data->input, data->max_fingers, + input_mt_init_slots(data->input, SILEAD_MAX_FINGERS, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK); @@ -256,10 +255,10 @@ static void silead_ts_read_data(struct i2c_client *client) return; } - if (buf[0] > data->max_fingers) { + if (buf[0] > SILEAD_MAX_FINGERS) { dev_warn(dev, "More touches reported then supported %d > %d\n", - buf[0], data->max_fingers); - buf[0] = data->max_fingers; + buf[0], SILEAD_MAX_FINGERS); + buf[0] = SILEAD_MAX_FINGERS; } if (silead_ts_handle_pen_data(data, buf)) @@ -315,7 +314,6 @@ static void silead_ts_read_data(struct i2c_client *client) static int silead_ts_init(struct i2c_client *client) { - struct silead_ts_data *data = i2c_get_clientdata(client); int error; error = i2c_smbus_write_byte_data(client, SILEAD_REG_RESET, @@ -325,7 +323,7 @@ static int silead_ts_init(struct i2c_client *client) usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX); error = i2c_smbus_write_byte_data(client, SILEAD_REG_TOUCH_NR, - data->max_fingers); + SILEAD_MAX_FINGERS); if (error) goto i2c_write_err; usleep_range(SILEAD_CMD_SLEEP_MIN, SILEAD_CMD_SLEEP_MAX); @@ -591,13 +589,6 @@ static void silead_ts_read_props(struct i2c_client *client) const char *str; int error; - error = device_property_read_u32(dev, "silead,max-fingers", - &data->max_fingers); - if (error) { - dev_dbg(dev, "Max fingers read error %d\n", error); - data->max_fingers = 5; /* Most devices handle up-to 5 fingers */ - } - error = device_property_read_string(dev, "firmware-name", &str); if (!error) snprintf(data->fw_name, sizeof(data->fw_name), From 84b26f509c1b9c8b3c3c63d75912701f151fd148 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 25 May 2024 21:38:54 +0200 Subject: [PATCH 189/537] platform/x86: touchscreen_dmi: Drop "silead,max-fingers" property The silead touchscreen driver now defaults to 10 fingers, so it is no longer necessary to have a "silead,max-fingers=10" property for each silead touchscreen model. Drop this property from all the configs. Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20240525193854.39130-3-hdegoede@redhat.com --- drivers/platform/x86/touchscreen_dmi.c | 57 -------------------------- 1 file changed, 57 deletions(-) diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 2d9ca2292ea19..3f626584f2c3f 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -34,7 +34,6 @@ static const struct property_entry archos_101_cesium_educ_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-archos-101-cesium-educ.fw"), { } @@ -49,7 +48,6 @@ static const struct property_entry bush_bush_windows_tablet_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1850), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-bush-bush-windows-tablet.fw"), { } @@ -79,7 +77,6 @@ static const struct property_entry chuwi_hi8_air_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -95,7 +92,6 @@ static const struct property_entry chuwi_hi8_pro_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -123,7 +119,6 @@ static const struct property_entry chuwi_hi10_air_props[] = { PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5), PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -139,7 +134,6 @@ static const struct property_entry chuwi_hi10_plus_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1908), PROPERTY_ENTRY_U32("touchscreen-size-y", 1270), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("silead,pen-supported"), PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8), @@ -171,7 +165,6 @@ static const struct property_entry chuwi_hi10_pro_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"), PROPERTY_ENTRY_U32_ARRAY("silead,efi-fw-min-max", chuwi_hi10_pro_efi_min_max), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("silead,pen-supported"), PROPERTY_ENTRY_U32("silead,pen-resolution-x", 8), @@ -201,7 +194,6 @@ static const struct property_entry chuwi_hibook_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -227,7 +219,6 @@ static const struct property_entry chuwi_vi8_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-vi8.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -255,7 +246,6 @@ static const struct property_entry chuwi_vi10_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1858), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-vi10.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -271,7 +261,6 @@ static const struct property_entry chuwi_surbook_mini_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2040), PROPERTY_ENTRY_U32("touchscreen-size-y", 1524), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-surbook-mini.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), { } }; @@ -289,7 +278,6 @@ static const struct property_entry connect_tablet9_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-connect-tablet9.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -306,7 +294,6 @@ static const struct property_entry csl_panther_tab_hd_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -322,7 +309,6 @@ static const struct property_entry cube_iwork8_air_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 896), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-cube-iwork8-air.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -346,7 +332,6 @@ static const struct property_entry cube_knote_i1101_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1961), PROPERTY_ENTRY_U32("touchscreen-size-y", 1513), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-cube-knote-i1101.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -360,7 +345,6 @@ static const struct property_entry dexp_ursus_7w_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 890), PROPERTY_ENTRY_U32("touchscreen-size-y", 630), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -376,7 +360,6 @@ static const struct property_entry dexp_ursus_kx210i_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1720), PROPERTY_ENTRY_U32("touchscreen-size-y", 1137), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -391,7 +374,6 @@ static const struct property_entry digma_citi_e200_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -450,7 +432,6 @@ static const struct property_entry irbis_tw90_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-irbis_tw90.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -466,7 +447,6 @@ static const struct property_entry irbis_tw118_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1960), PROPERTY_ENTRY_U32("touchscreen-size-y", 1510), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-irbis-tw118.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -483,7 +463,6 @@ static const struct property_entry itworks_tw891_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -496,7 +475,6 @@ static const struct property_entry jumper_ezpad_6_pro_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -511,7 +489,6 @@ static const struct property_entry jumper_ezpad_6_pro_b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -527,7 +504,6 @@ static const struct property_entry jumper_ezpad_6_m4_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1950), PROPERTY_ENTRY_U32("touchscreen-size-y", 1525), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-m4.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -544,7 +520,6 @@ static const struct property_entry jumper_ezpad_7_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1526), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-jumper-ezpad-7.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,stuck-controller-bug"), { } }; @@ -561,7 +536,6 @@ static const struct property_entry jumper_ezpad_mini3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1138), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-jumper-ezpad-mini3.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -578,7 +552,6 @@ static const struct property_entry mpman_converter9_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-mpman-converter9.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -594,7 +567,6 @@ static const struct property_entry mpman_mpwin895cl_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1150), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-mpman-mpwin895cl.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -611,7 +583,6 @@ static const struct property_entry myria_my8307_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -628,7 +599,6 @@ static const struct property_entry onda_obook_20_plus_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-obook-20-plus.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -645,7 +615,6 @@ static const struct property_entry onda_v80_plus_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -669,7 +638,6 @@ static const struct property_entry onda_v820w_32g_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -687,7 +655,6 @@ static const struct property_entry onda_v891_v5_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891-v5.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -703,7 +670,6 @@ static const struct property_entry onda_v891w_v1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1676), PROPERTY_ENTRY_U32("touchscreen-size-y", 1130), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -720,7 +686,6 @@ static const struct property_entry onda_v891w_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1135), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -759,7 +724,6 @@ static const struct property_entry pipo_w11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), PROPERTY_ENTRY_U32("touchscreen-size-y", 1532), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -775,7 +739,6 @@ static const struct property_entry positivo_c4128b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1915), PROPERTY_ENTRY_U32("touchscreen-size-y", 1269), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-positivo-c4128b.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -791,7 +754,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1146), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -808,7 +770,6 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -825,7 +786,6 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1520), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -842,7 +802,6 @@ static const struct property_entry predia_basic_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1144), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-predia-basic.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -859,7 +818,6 @@ static const struct property_entry rca_cambio_w101_v2_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 874), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -874,7 +832,6 @@ static const struct property_entry rwc_nanote_p8_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -890,7 +847,6 @@ static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -906,7 +862,6 @@ static const struct property_entry globalspace_solt_ivw116_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1723), PROPERTY_ENTRY_U32("touchscreen-size-y", 1077), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -923,7 +878,6 @@ static const struct property_entry techbite_arc_11_6_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1270), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-techbite-arc-11-6.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -939,7 +893,6 @@ static const struct property_entry teclast_tbook11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1264), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-tbook11.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -965,7 +918,6 @@ static const struct property_entry teclast_x16_plus_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1264), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -988,7 +940,6 @@ static const struct property_entry teclast_x3_plus_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-teclast-x3-plus.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1004,7 +955,6 @@ static const struct property_entry teclast_x98plus2_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -1018,7 +968,6 @@ static const struct property_entry trekstor_primebook_c11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1530), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1032,7 +981,6 @@ static const struct property_entry trekstor_primebook_c13_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2624), PROPERTY_ENTRY_U32("touchscreen-size-y", 1920), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1046,7 +994,6 @@ static const struct property_entry trekstor_primetab_t13b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2500), PROPERTY_ENTRY_U32("touchscreen-size-y", 1900), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), { } @@ -1074,7 +1021,6 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1), PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1090,7 +1036,6 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 884), PROPERTY_ENTRY_U32("touchscreen-size-y", 632), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1107,7 +1052,6 @@ static const struct property_entry viglen_connect_10_props[] = { PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 6), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-viglen-connect-10.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -1121,7 +1065,6 @@ static const struct property_entry vinga_twizzle_j116_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1920), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-vinga-twizzle_j116.fw"), - PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; From 55624db051c8bdd39acbb1f35774c3f7b97c07b8 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sun, 2 Jun 2024 11:58:00 +0300 Subject: [PATCH 190/537] platform/x86: touchscreen_dmi: Use 2-argument strscpy() Use 2-argument strscpy(), which is not only shorter but also provides an additional check that destination buffer is an array. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240602090244.1666360-8-andy.shevchenko@gmail.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/touchscreen_dmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 3f626584f2c3f..f74af0a689f20 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -1850,7 +1850,7 @@ static int __init ts_parse_props(char *str) u32 u32val; int i, ret; - strscpy(orig_str, str, sizeof(orig_str)); + strscpy(orig_str, str); /* * str is part of the static_command_line from init/main.c and poking From 078fc56f5c1787a9272373742f817b320c046d1a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 28 May 2024 13:59:33 +0200 Subject: [PATCH 191/537] platform/x86: yt2-1380: add CONFIG_EXTCON dependency This driver uses the extcon subsystem and fails to build when it cannot call into that subsystem: x86_64-linux-ld: vmlinux.o: in function `yt2_1380_fc_worker': lenovo-yoga-tab2-pro-1380-fastcharger.c:(.text+0xa9d819): undefined reference to `extcon_get_state' x86_64-linux-ld: lenovo-yoga-tab2-pro-1380-fastcharger.c:(.text+0xa9d853): undefined reference to `extcon_get_state' x86_64-linux-ld: vmlinux.o: in function `yt2_1380_fc_serdev_probe': lenovo-yoga-tab2-pro-1380-fastcharger.c:(.text+0xa9da22): undefined reference to `extcon_get_extcon_dev' x86_64-linux-ld: lenovo-yoga-tab2-pro-1380-fastcharger.c:(.text+0xa9dc0c): undefined reference to `devm_extcon_register_notifier_all' Add a Kconfig dependency to make it it always builds correctly. Fixes: b2ed33e8d486 ("platform/x86: Add lenovo-yoga-tab2-pro-1380-fastcharger driver") Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20240528115940.3169455-1-arnd@kernel.org Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 1953317541eab..665fa95249865 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -136,6 +136,7 @@ config YOGABOOK config YT2_1380 tristate "Lenovo Yoga Tablet 2 1380 fast charge driver" depends on SERIAL_DEV_BUS + depends on EXTCON depends on ACPI help Say Y here to enable support for the custom fast charging protocol From 1981b296f858010eae409548fd297659b2cc570e Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Tue, 28 May 2024 22:49:02 +0200 Subject: [PATCH 192/537] platform/x86: dell-smbios: Fix wrong token data in sysfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When reading token data from sysfs on my Inspiron 3505, the token locations and values are wrong. This happens because match_attribute() blindly assumes that all entries in da_tokens have an associated entry in token_attrs. This however is not true as soon as da_tokens[] contains zeroed token entries. Those entries are being skipped when initialising token_attrs, breaking the core assumption of match_attribute(). Fix this by defining an extra struct for each pair of token attributes and use container_of() to retrieve token information. Tested on a Dell Inspiron 3050. Fixes: 33b9ca1e53b4 ("platform/x86: dell-smbios: Add a sysfs interface for SMBIOS tokens") Signed-off-by: Armin Wolf Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240528204903.445546-1-W_Armin@gmx.de Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/dell/dell-smbios-base.c | 92 ++++++++------------ 1 file changed, 36 insertions(+), 56 deletions(-) diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index e61bfaf8b5c48..86b95206cb1bd 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -11,6 +11,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -25,11 +26,16 @@ static u32 da_supported_commands; static int da_num_tokens; static struct platform_device *platform_device; static struct calling_interface_token *da_tokens; -static struct device_attribute *token_location_attrs; -static struct device_attribute *token_value_attrs; +static struct token_sysfs_data *token_entries; static struct attribute **token_attrs; static DEFINE_MUTEX(smbios_mutex); +struct token_sysfs_data { + struct device_attribute location_attr; + struct device_attribute value_attr; + struct calling_interface_token *token; +}; + struct smbios_device { struct list_head list; struct device *device; @@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy) } } -static int match_attribute(struct device *dev, - struct device_attribute *attr) -{ - int i; - - for (i = 0; i < da_num_tokens * 2; i++) { - if (!token_attrs[i]) - continue; - if (strcmp(token_attrs[i]->name, attr->attr.name) == 0) - return i/2; - } - dev_dbg(dev, "couldn't match: %s\n", attr->attr.name); - return -EINVAL; -} - static ssize_t location_show(struct device *dev, struct device_attribute *attr, char *buf) { - int i; + struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr); if (!capable(CAP_SYS_ADMIN)) return -EPERM; - i = match_attribute(dev, attr); - if (i > 0) - return sysfs_emit(buf, "%08x", da_tokens[i].location); - return 0; + return sysfs_emit(buf, "%08x", data->token->location); } static ssize_t value_show(struct device *dev, struct device_attribute *attr, char *buf) { - int i; + struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr); if (!capable(CAP_SYS_ADMIN)) return -EPERM; - i = match_attribute(dev, attr); - if (i > 0) - return sysfs_emit(buf, "%08x", da_tokens[i].value); - return 0; + return sysfs_emit(buf, "%08x", data->token->value); } static struct attribute_group smbios_attribute_group = { @@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev) { char *location_name; char *value_name; - size_t size; int ret; int i, j; - /* (number of tokens + 1 for null terminated */ - size = sizeof(struct device_attribute) * (da_num_tokens + 1); - token_location_attrs = kzalloc(size, GFP_KERNEL); - if (!token_location_attrs) + token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL); + if (!token_entries) return -ENOMEM; - token_value_attrs = kzalloc(size, GFP_KERNEL); - if (!token_value_attrs) - goto out_allocate_value; /* need to store both location and value + terminator*/ - size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1); - token_attrs = kzalloc(size, GFP_KERNEL); + token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL); if (!token_attrs) goto out_allocate_attrs; @@ -496,27 +474,32 @@ static int build_tokens_sysfs(struct platform_device *dev) /* skip empty */ if (da_tokens[i].tokenID == 0) continue; + + token_entries[i].token = &da_tokens[i]; + /* add location */ location_name = kasprintf(GFP_KERNEL, "%04x_location", da_tokens[i].tokenID); if (location_name == NULL) goto out_unwind_strings; - sysfs_attr_init(&token_location_attrs[i].attr); - token_location_attrs[i].attr.name = location_name; - token_location_attrs[i].attr.mode = 0444; - token_location_attrs[i].show = location_show; - token_attrs[j++] = &token_location_attrs[i].attr; + + sysfs_attr_init(&token_entries[i].location_attr.attr); + token_entries[i].location_attr.attr.name = location_name; + token_entries[i].location_attr.attr.mode = 0444; + token_entries[i].location_attr.show = location_show; + token_attrs[j++] = &token_entries[i].location_attr.attr; /* add value */ value_name = kasprintf(GFP_KERNEL, "%04x_value", da_tokens[i].tokenID); if (value_name == NULL) goto loop_fail_create_value; - sysfs_attr_init(&token_value_attrs[i].attr); - token_value_attrs[i].attr.name = value_name; - token_value_attrs[i].attr.mode = 0444; - token_value_attrs[i].show = value_show; - token_attrs[j++] = &token_value_attrs[i].attr; + + sysfs_attr_init(&token_entries[i].value_attr.attr); + token_entries[i].value_attr.attr.name = value_name; + token_entries[i].value_attr.attr.mode = 0444; + token_entries[i].value_attr.show = value_show; + token_attrs[j++] = &token_entries[i].value_attr.attr; continue; loop_fail_create_value: @@ -532,14 +515,12 @@ static int build_tokens_sysfs(struct platform_device *dev) out_unwind_strings: while (i--) { - kfree(token_location_attrs[i].attr.name); - kfree(token_value_attrs[i].attr.name); + kfree(token_entries[i].location_attr.attr.name); + kfree(token_entries[i].value_attr.attr.name); } kfree(token_attrs); out_allocate_attrs: - kfree(token_value_attrs); -out_allocate_value: - kfree(token_location_attrs); + kfree(token_entries); return -ENOMEM; } @@ -551,12 +532,11 @@ static void free_group(struct platform_device *pdev) sysfs_remove_group(&pdev->dev.kobj, &smbios_attribute_group); for (i = 0; i < da_num_tokens; i++) { - kfree(token_location_attrs[i].attr.name); - kfree(token_value_attrs[i].attr.name); + kfree(token_entries[i].location_attr.attr.name); + kfree(token_entries[i].value_attr.attr.name); } kfree(token_attrs); - kfree(token_value_attrs); - kfree(token_location_attrs); + kfree(token_entries); } static int __init dell_smbios_init(void) From 306aec7eea8c83e8212f4dd6e5b358c508e3e466 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Tue, 28 May 2024 22:49:03 +0200 Subject: [PATCH 193/537] platform/x86: dell-smbios: Simplify error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the allocation of value_name fails, the error handling code uses two gotos for error handling, which is not necessary. Simplify the error handling in this case by only using a single goto. Tested on a Dell Inspiron 3505. Signed-off-by: Armin Wolf Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240528204903.445546-2-W_Armin@gmx.de Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/dell/dell-smbios-base.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index 86b95206cb1bd..b562ed99ec4e7 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -492,19 +492,16 @@ static int build_tokens_sysfs(struct platform_device *dev) /* add value */ value_name = kasprintf(GFP_KERNEL, "%04x_value", da_tokens[i].tokenID); - if (value_name == NULL) - goto loop_fail_create_value; + if (!value_name) { + kfree(location_name); + goto out_unwind_strings; + } sysfs_attr_init(&token_entries[i].value_attr.attr); token_entries[i].value_attr.attr.name = value_name; token_entries[i].value_attr.attr.mode = 0444; token_entries[i].value_attr.show = value_show; token_attrs[j++] = &token_entries[i].value_attr.attr; - continue; - -loop_fail_create_value: - kfree(location_name); - goto out_unwind_strings; } smbios_attribute_group.attrs = token_attrs; From 77f1972bdcf7513293e8bbe376b9fe837310ee9c Mon Sep 17 00:00:00 2001 From: Suma Hegde Date: Mon, 3 Jun 2024 08:15:12 +0000 Subject: [PATCH 194/537] platform/x86/amd/hsmp: Check HSMP support on AMD family of processors HSMP interface is supported only on few x86 processors from AMD. Accessing HSMP registers on rest of the platforms might cause unexpected behaviour. So add a check. Also unavailability of this interface on rest of the processors is not an error. Hence, use pr_info() instead of the pr_err() to log the message. Signed-off-by: Suma Hegde Reviewed-by: Naveen Krishna Chatradhi Link: https://lore.kernel.org/r/20240603081512.142909-1-suma.hegde@amd.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/amd/hsmp.c | 50 ++++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 7 deletions(-) diff --git a/drivers/platform/x86/amd/hsmp.c b/drivers/platform/x86/amd/hsmp.c index d84ea66eecc6b..8fcf38eed7f00 100644 --- a/drivers/platform/x86/amd/hsmp.c +++ b/drivers/platform/x86/amd/hsmp.c @@ -907,16 +907,44 @@ static int hsmp_plat_dev_register(void) return ret; } +/* + * This check is only needed for backward compatibility of previous platforms. + * All new platforms are expected to support ACPI based probing. + */ +static bool legacy_hsmp_support(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return false; + + switch (boot_cpu_data.x86) { + case 0x19: + switch (boot_cpu_data.x86_model) { + case 0x00 ... 0x1F: + case 0x30 ... 0x3F: + case 0x90 ... 0x9F: + case 0xA0 ... 0xAF: + return true; + default: + return false; + } + case 0x1A: + switch (boot_cpu_data.x86_model) { + case 0x00 ... 0x1F: + return true; + default: + return false; + } + default: + return false; + } + + return false; +} + static int __init hsmp_plt_init(void) { int ret = -ENODEV; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD || boot_cpu_data.x86 < 0x19) { - pr_err("HSMP is not supported on Family:%x model:%x\n", - boot_cpu_data.x86, boot_cpu_data.x86_model); - return ret; - } - /* * amd_nb_num() returns number of SMN/DF interfaces present in the system * if we have N SMN/DF interfaces that ideally means N sockets @@ -930,7 +958,15 @@ static int __init hsmp_plt_init(void) return ret; if (!plat_dev.is_acpi_device) { - ret = hsmp_plat_dev_register(); + if (legacy_hsmp_support()) { + /* Not ACPI device, but supports HSMP, register a plat_dev */ + ret = hsmp_plat_dev_register(); + } else { + /* Not ACPI, Does not support HSMP */ + pr_info("HSMP is not supported on Family:%x model:%x\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + ret = -ENODEV; + } if (ret) platform_driver_unregister(&amd_hsmp_driver); } From 0110c4b110477bb1f19b0d02361846be7ab08300 Mon Sep 17 00:00:00 2001 From: Sunil V L Date: Mon, 27 May 2024 13:41:13 +0530 Subject: [PATCH 195/537] irqchip/riscv-intc: Prevent memory leak when riscv_intc_init_common() fails When riscv_intc_init_common() fails, the firmware node allocated is not freed. Add the missing free(). Fixes: 7023b9d83f03 ("irqchip/riscv-intc: Add ACPI support") Signed-off-by: Sunil V L Signed-off-by: Thomas Gleixner Reviewed-by: Anup Patel Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240527081113.616189-1-sunilvl@ventanamicro.com --- drivers/irqchip/irq-riscv-intc.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c index 9e71c44288141..4f3a12383a1e4 100644 --- a/drivers/irqchip/irq-riscv-intc.c +++ b/drivers/irqchip/irq-riscv-intc.c @@ -253,8 +253,9 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init); static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { - struct fwnode_handle *fn; struct acpi_madt_rintc *rintc; + struct fwnode_handle *fn; + int rc; rintc = (struct acpi_madt_rintc *)header; @@ -273,7 +274,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, return -ENOMEM; } - return riscv_intc_init_common(fn, &riscv_intc_chip); + rc = riscv_intc_init_common(fn, &riscv_intc_chip); + if (rc) + irq_domain_free_fwnode(fn); + + return rc; } IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL, From 6149db4997f582e958da675092f21c666e3b67b7 Mon Sep 17 00:00:00 2001 From: Tristram Ha Date: Tue, 28 May 2024 19:20:23 -0700 Subject: [PATCH 196/537] net: phy: micrel: fix KSZ9477 PHY issues after suspend/resume When the PHY is powered up after powered down most of the registers are reset, so the PHY setup code needs to be done again. In addition the interrupt register will need to be setup again so that link status indication works again. Fixes: 26dd2974c5b5 ("net: phy: micrel: Move KSZ9477 errata fixes to PHY driver") Signed-off-by: Tristram Ha Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 62 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 6 deletions(-) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 2b8f8b7f1517c..8c20cf9375306 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1939,7 +1939,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = { {0x1c, 0x20, 0xeeee}, }; -static int ksz9477_config_init(struct phy_device *phydev) +static int ksz9477_phy_errata(struct phy_device *phydev) { int err; int i; @@ -1967,16 +1967,30 @@ static int ksz9477_config_init(struct phy_device *phydev) return err; } + err = genphy_restart_aneg(phydev); + if (err) + return err; + + return err; +} + +static int ksz9477_config_init(struct phy_device *phydev) +{ + int err; + + /* Only KSZ9897 family of switches needs this fix. */ + if ((phydev->phy_id & 0xf) == 1) { + err = ksz9477_phy_errata(phydev); + if (err) + return err; + } + /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes * in this switch shall be regarded as broken. */ if (phydev->dev_flags & MICREL_NO_EEE) phydev->eee_broken_modes = -1; - err = genphy_restart_aneg(phydev); - if (err) - return err; - return kszphy_config_init(phydev); } @@ -2085,6 +2099,42 @@ static int kszphy_resume(struct phy_device *phydev) return 0; } +static int ksz9477_resume(struct phy_device *phydev) +{ + int ret; + + /* No need to initialize registers if not powered down. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (!(ret & BMCR_PDOWN)) + return 0; + + genphy_resume(phydev); + + /* After switching from power-down to normal mode, an internal global + * reset is automatically generated. Wait a minimum of 1 ms before + * read/write access to the PHY registers. + */ + usleep_range(1000, 2000); + + /* Only KSZ9897 family of switches needs this fix. */ + if ((phydev->phy_id & 0xf) == 1) { + ret = ksz9477_phy_errata(phydev); + if (ret) + return ret; + } + + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } + + return 0; +} + static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; @@ -5493,7 +5543,7 @@ static struct phy_driver ksphy_driver[] = { .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, .suspend = genphy_suspend, - .resume = genphy_resume, + .resume = ksz9477_resume, .get_features = ksz9477_get_features, } }; From e306a894bd511804ba9db7c00ca9cc05b55df1f2 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Wed, 29 May 2024 14:54:56 -0700 Subject: [PATCH 197/537] irqchip/sifive-plic: Chain to parent IRQ after handlers are ready Now that the PLIC uses a platform driver, the driver is probed later in the boot process, where interrupts from peripherals might already be pending. As a result, plic_handle_irq() may be called as early as the call to irq_set_chained_handler() completes. But this call happens before the per-context handler is completely set up, so there is a window where plic_handle_irq() can see incomplete per-context state and crash. Avoid this by delaying the call to irq_set_chained_handler() until all handlers from all PLICs are initialized. Fixes: 8ec99b033147 ("irqchip/sifive-plic: Convert PLIC driver into a platform driver") Reported-by: Geert Uytterhoeven Signed-off-by: Samuel Holland Signed-off-by: Thomas Gleixner Tested-by: Geert Uytterhoeven Reviewed-by: Anup Patel Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240529215458.937817-1-samuel.holland@sifive.com Closes: https://lore.kernel.org/r/CAMuHMdVYFFR7K5SbHBLY-JHhb7YpgGMS_hnRWm8H0KD-wBo+4A@mail.gmail.com/ --- drivers/irqchip/irq-sifive-plic.c | 34 +++++++++++++++---------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 8fb183ced1e77..9e22f7e378f50 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -85,7 +85,7 @@ struct plic_handler { struct plic_priv *priv; }; static int plic_parent_irq __ro_after_init; -static bool plic_cpuhp_setup_done __ro_after_init; +static bool plic_global_setup_done __ro_after_init; static DEFINE_PER_CPU(struct plic_handler, plic_handlers); static int plic_irq_set_type(struct irq_data *d, unsigned int type); @@ -487,10 +487,8 @@ static int plic_probe(struct platform_device *pdev) unsigned long plic_quirks = 0; struct plic_handler *handler; u32 nr_irqs, parent_hwirq; - struct irq_domain *domain; struct plic_priv *priv; irq_hw_number_t hwirq; - bool cpuhp_setup; if (is_of_node(dev->fwnode)) { const struct of_device_id *id; @@ -549,14 +547,6 @@ static int plic_probe(struct platform_device *pdev) continue; } - /* Find parent domain and register chained handler */ - domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); - if (!plic_parent_irq && domain) { - plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); - if (plic_parent_irq) - irq_set_chained_handler(plic_parent_irq, plic_handle_irq); - } - /* * When running in M-mode we need to ignore the S-mode handler. * Here we assume it always comes later, but that might be a @@ -597,25 +587,35 @@ static int plic_probe(struct platform_device *pdev) goto fail_cleanup_contexts; /* - * We can have multiple PLIC instances so setup cpuhp state + * We can have multiple PLIC instances so setup global state * and register syscore operations only once after context * handlers of all online CPUs are initialized. */ - if (!plic_cpuhp_setup_done) { - cpuhp_setup = true; + if (!plic_global_setup_done) { + struct irq_domain *domain; + bool global_setup = true; + for_each_online_cpu(cpu) { handler = per_cpu_ptr(&plic_handlers, cpu); if (!handler->present) { - cpuhp_setup = false; + global_setup = false; break; } } - if (cpuhp_setup) { + + if (global_setup) { + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (domain) + plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (plic_parent_irq) + irq_set_chained_handler(plic_parent_irq, plic_handle_irq); + cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, "irqchip/sifive/plic:starting", plic_starting_cpu, plic_dying_cpu); register_syscore_ops(&plic_irq_syscore_ops); - plic_cpuhp_setup_done = true; + plic_global_setup_done = true; } } From 8c219e52ca4d9a67cd6a7074e91bf29b55edc075 Mon Sep 17 00:00:00 2001 From: Gregor Herburger Date: Thu, 30 May 2024 12:19:59 +0200 Subject: [PATCH 198/537] gpio: tqmx86: fix typo in Kconfig label Fix description for GPIO_TQMX86 from QTMX86 to TQMx86. Fixes: b868db94a6a7 ("gpio: tqmx86: Add GPIO from for this IO controller") Signed-off-by: Gregor Herburger Signed-off-by: Matthias Schiffer Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/e0e38c9944ad6d281d9a662a45d289b88edc808e.1717063994.git.matthias.schiffer@ew.tq-group.com Signed-off-by: Bartosz Golaszewski --- drivers/gpio/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3dbddec070281..1c28a48915bb2 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -1576,7 +1576,7 @@ config GPIO_TPS68470 are "output only" GPIOs. config GPIO_TQMX86 - tristate "TQ-Systems QTMX86 GPIO" + tristate "TQ-Systems TQMx86 GPIO" depends on MFD_TQMX86 || COMPILE_TEST depends on HAS_IOPORT_MAP select GPIOLIB_IRQCHIP From 9d6a811b522ba558bcb4ec01d12e72a0af8e9f6e Mon Sep 17 00:00:00 2001 From: Matthias Schiffer Date: Thu, 30 May 2024 12:20:00 +0200 Subject: [PATCH 199/537] gpio: tqmx86: introduce shadow register for GPIO output value The TQMx86 GPIO controller uses the same register address for input and output data. Reading the register will always return current inputs rather than the previously set outputs (regardless of the current direction setting). Therefore, using a RMW pattern does not make sense when setting output values. Instead, the previously set output register value needs to be stored as a shadow register. As there is no reliable way to get the current output values from the hardware, also initialize all channels to 0, to ensure that stored and actual output values match. This should usually not have any effect in practise, as the TQMx86 UEFI sets all outputs to 0 during boot. Also prepare for extension of the driver to more than 8 GPIOs by using DECLARE_BITMAP. Fixes: b868db94a6a7 ("gpio: tqmx86: Add GPIO from for this IO controller") Signed-off-by: Matthias Schiffer Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/d0555933becd45fa92a85675d26e4d59343ddc01.1717063994.git.matthias.schiffer@ew.tq-group.com Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpio-tqmx86.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index 3a28c1f273c39..b7e2dbbdc4ebe 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -6,6 +6,7 @@ * Vadim V.Vlasov */ +#include #include #include #include @@ -38,6 +39,7 @@ struct tqmx86_gpio_data { void __iomem *io_base; int irq; raw_spinlock_t spinlock; + DECLARE_BITMAP(output, TQMX86_NGPIO); u8 irq_type[TQMX86_NGPI]; }; @@ -64,15 +66,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset, { struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); unsigned long flags; - u8 val; raw_spin_lock_irqsave(&gpio->spinlock, flags); - val = tqmx86_gpio_read(gpio, TQMX86_GPIOD); - if (value) - val |= BIT(offset); - else - val &= ~BIT(offset); - tqmx86_gpio_write(gpio, val, TQMX86_GPIOD); + __assign_bit(offset, gpio->output, value); + tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); } @@ -277,6 +274,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev) tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD); + /* + * Reading the previous output state is not possible with TQMx86 hardware. + * Initialize all outputs to 0 to have a defined state that matches the + * shadow register. + */ + tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD); + chip = &gpio->chip; chip->label = "gpio-tqmx86"; chip->owner = THIS_MODULE; From 08af509efdf8dad08e972b48de0e2c2a7919ea8b Mon Sep 17 00:00:00 2001 From: Matthias Schiffer Date: Thu, 30 May 2024 12:20:01 +0200 Subject: [PATCH 200/537] gpio: tqmx86: store IRQ trigger type and unmask status separately irq_set_type() should not implicitly unmask the IRQ. All accesses to the interrupt configuration register are moved to a new helper tqmx86_gpio_irq_config(). We also introduce the new rule that accessing irq_type must happen while locked, which will become significant for fixing EDGE_BOTH handling. Fixes: b868db94a6a7 ("gpio: tqmx86: Add GPIO from for this IO controller") Signed-off-by: Matthias Schiffer Link: https://lore.kernel.org/r/6aa4f207f77cb58ef64ffb947e91949b0f753ccd.1717063994.git.matthias.schiffer@ew.tq-group.com Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpio-tqmx86.c | 48 ++++++++++++++++++++++---------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index b7e2dbbdc4ebe..7e428c872a257 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -29,15 +29,19 @@ #define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */ #define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */ +#define TQMX86_GPII_NONE 0 #define TQMX86_GPII_FALLING BIT(0) #define TQMX86_GPII_RISING BIT(1) #define TQMX86_GPII_MASK (BIT(0) | BIT(1)) #define TQMX86_GPII_BITS 2 +/* Stored in irq_type with GPII bits */ +#define TQMX86_INT_UNMASKED BIT(2) struct tqmx86_gpio_data { struct gpio_chip chip; void __iomem *io_base; int irq; + /* Lock must be held for accessing output and irq_type fields */ raw_spinlock_t spinlock; DECLARE_BITMAP(output, TQMX86_NGPIO); u8 irq_type[TQMX86_NGPI]; @@ -104,21 +108,32 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip, return GPIO_LINE_DIRECTION_OUT; } +static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset) + __must_hold(&gpio->spinlock) +{ + u8 type = TQMX86_GPII_NONE, gpiic; + + if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) + type = gpio->irq_type[offset] & TQMX86_GPII_MASK; + + gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); + gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS)); + gpiic |= type << (offset * TQMX86_GPII_BITS); + tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); +} + static void tqmx86_gpio_irq_mask(struct irq_data *data) { unsigned int offset = (data->hwirq - TQMX86_NGPO); struct tqmx86_gpio_data *gpio = gpiochip_get_data( irq_data_get_irq_chip_data(data)); unsigned long flags; - u8 gpiic, mask; - - mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS); raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~mask; - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); + gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data)); } @@ -128,16 +143,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data) struct tqmx86_gpio_data *gpio = gpiochip_get_data( irq_data_get_irq_chip_data(data)); unsigned long flags; - u8 gpiic, mask; - - mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS); gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data)); + raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~mask; - gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS); - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] |= TQMX86_INT_UNMASKED; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); } @@ -148,7 +159,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type) unsigned int offset = (data->hwirq - TQMX86_NGPO); unsigned int edge_type = type & IRQF_TRIGGER_MASK; unsigned long flags; - u8 new_type, gpiic; + u8 new_type; switch (edge_type) { case IRQ_TYPE_EDGE_RISING: @@ -164,13 +175,10 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type) return -EINVAL; /* not supported */ } - gpio->irq_type[offset] = new_type; - raw_spin_lock_irqsave(&gpio->spinlock, flags); - gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); - gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS)); - gpiic |= new_type << (offset * TQMX86_GPII_BITS); - tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + gpio->irq_type[offset] &= ~TQMX86_GPII_MASK; + gpio->irq_type[offset] |= new_type; + tqmx86_gpio_irq_config(gpio, offset); raw_spin_unlock_irqrestore(&gpio->spinlock, flags); return 0; From 90dd7de4ef7ba584823dfbeba834c2919a4bb55b Mon Sep 17 00:00:00 2001 From: Matthias Schiffer Date: Thu, 30 May 2024 12:20:02 +0200 Subject: [PATCH 201/537] gpio: tqmx86: fix broken IRQ_TYPE_EDGE_BOTH interrupt type The TQMx86 GPIO controller only supports falling and rising edge triggers, but not both. Fix this by implementing a software both-edge mode that toggles the edge type after every interrupt. Fixes: b868db94a6a7 ("gpio: tqmx86: Add GPIO from for this IO controller") Co-developed-by: Gregor Herburger Signed-off-by: Gregor Herburger Signed-off-by: Matthias Schiffer Link: https://lore.kernel.org/r/515324f0491c4d44f4ef49f170354aca002d81ef.1717063994.git.matthias.schiffer@ew.tq-group.com Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpio-tqmx86.c | 46 ++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c index 7e428c872a257..f2e7e8754d95d 100644 --- a/drivers/gpio/gpio-tqmx86.c +++ b/drivers/gpio/gpio-tqmx86.c @@ -32,6 +32,10 @@ #define TQMX86_GPII_NONE 0 #define TQMX86_GPII_FALLING BIT(0) #define TQMX86_GPII_RISING BIT(1) +/* Stored in irq_type as a trigger type, but not actually valid as a register + * value, so the name doesn't use "GPII" + */ +#define TQMX86_INT_BOTH (BIT(0) | BIT(1)) #define TQMX86_GPII_MASK (BIT(0) | BIT(1)) #define TQMX86_GPII_BITS 2 /* Stored in irq_type with GPII bits */ @@ -113,9 +117,15 @@ static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset) { u8 type = TQMX86_GPII_NONE, gpiic; - if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) + if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) { type = gpio->irq_type[offset] & TQMX86_GPII_MASK; + if (type == TQMX86_INT_BOTH) + type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO) + ? TQMX86_GPII_FALLING + : TQMX86_GPII_RISING; + } + gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS)); gpiic |= type << (offset * TQMX86_GPII_BITS); @@ -169,7 +179,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type) new_type = TQMX86_GPII_FALLING; break; case IRQ_TYPE_EDGE_BOTH: - new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING; + new_type = TQMX86_INT_BOTH; break; default: return -EINVAL; /* not supported */ @@ -189,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc) struct gpio_chip *chip = irq_desc_get_handler_data(desc); struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); struct irq_chip *irq_chip = irq_desc_get_chip(desc); - unsigned long irq_bits; - int i = 0; + unsigned long irq_bits, flags; + int i; u8 irq_status; chained_irq_enter(irq_chip, desc); @@ -199,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc) tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS); irq_bits = irq_status; + + raw_spin_lock_irqsave(&gpio->spinlock, flags); + for_each_set_bit(i, &irq_bits, TQMX86_NGPI) { + /* + * Edge-both triggers are implemented by flipping the edge + * trigger after each interrupt, as the controller only supports + * either rising or falling edge triggers, but not both. + * + * Internally, the TQMx86 GPIO controller has separate status + * registers for rising and falling edge interrupts. GPIIC + * configures which bits from which register are visible in the + * interrupt status register GPIIS and defines what triggers the + * parent IRQ line. Writing to GPIIS always clears both rising + * and falling interrupt flags internally, regardless of the + * currently configured trigger. + * + * In consequence, we can cleanly implement the edge-both + * trigger in software by first clearing the interrupt and then + * setting the new trigger based on the current GPIO input in + * tqmx86_gpio_irq_config() - even if an edge arrives between + * reading the input and setting the trigger, we will have a new + * interrupt pending. + */ + if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH) + tqmx86_gpio_irq_config(gpio, i); + } + raw_spin_unlock_irqrestore(&gpio->spinlock, flags); + for_each_set_bit(i, &irq_bits, TQMX86_NGPI) generic_handle_domain_irq(gpio->chip.irq.domain, i + TQMX86_NGPO); From 4a77c3dead97339478c7422eb07bf4bf63577008 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 2 Jun 2024 18:15:25 -0400 Subject: [PATCH 202/537] SUNRPC: Fix loop termination condition in gss_free_in_token_pages() The in_token->pages[] array is not NULL terminated. This results in the following KASAN splat: KASAN: maybe wild-memory-access in range [0x04a2013400000008-0x04a201340000000f] Fixes: bafa6b4d95d9 ("SUNRPC: Fix gss_free_in_token_pages()") Reviewed-by: Benjamin Coddington Signed-off-by: Chuck Lever --- net/sunrpc/auth_gss/svcauth_gss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 96ab50eda9c2e..73a90ad873fb9 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1069,7 +1069,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp, goto out_denied_free; pages = DIV_ROUND_UP(inlen, PAGE_SIZE); - in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL); + in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL); if (!in_token->pages) goto out_denied_free; in_token->page_base = 0; From 3d117494e2a88b9c1e8ad41bbbf2cf453a73620e Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 3 Jun 2024 14:23:44 +0800 Subject: [PATCH 203/537] cachefiles: remove unneeded include of close_fd() has been killed, let's get rid of unneeded as Al Viro pointed out [1]. [1] https://lore.kernel.org/r/20240603034055.GI1629371@ZenIV Suggested-by: Al Viro Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240603062344.818290-1-hsiangkao@linux.alibaba.com Signed-off-by: Christian Brauner --- fs/cachefiles/ondemand.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 58bd80956c5a6..bce005f2b4563 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include #include #include #include "internal.h" From c6144a21169fe7d0d70f1a0dae6f6301e5918d30 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 3 Jun 2024 22:43:11 +0900 Subject: [PATCH 204/537] tomoyo: update project links TOMOYO project has moved to SourceForge.net . Signed-off-by: Tetsuo Handa --- Documentation/admin-guide/LSM/tomoyo.rst | 35 ++++++++---------------- MAINTAINERS | 2 +- security/tomoyo/Kconfig | 2 +- security/tomoyo/common.c | 2 +- 4 files changed, 14 insertions(+), 27 deletions(-) diff --git a/Documentation/admin-guide/LSM/tomoyo.rst b/Documentation/admin-guide/LSM/tomoyo.rst index 4bc9c2b4da6f3..bdb2c2e2a1b26 100644 --- a/Documentation/admin-guide/LSM/tomoyo.rst +++ b/Documentation/admin-guide/LSM/tomoyo.rst @@ -9,8 +9,8 @@ TOMOYO is a name-based MAC extension (LSM module) for the Linux kernel. LiveCD-based tutorials are available at -http://tomoyo.sourceforge.jp/1.8/ubuntu12.04-live.html -http://tomoyo.sourceforge.jp/1.8/centos6-live.html +https://tomoyo.sourceforge.net/1.8/ubuntu12.04-live.html +https://tomoyo.sourceforge.net/1.8/centos6-live.html Though these tutorials use non-LSM version of TOMOYO, they are useful for you to know what TOMOYO is. @@ -21,45 +21,32 @@ How to enable TOMOYO? Build the kernel with ``CONFIG_SECURITY_TOMOYO=y`` and pass ``security=tomoyo`` on kernel's command line. -Please see http://tomoyo.osdn.jp/2.5/ for details. +Please see https://tomoyo.sourceforge.net/2.6/ for details. Where is documentation? ======================= User <-> Kernel interface documentation is available at -https://tomoyo.osdn.jp/2.5/policy-specification/index.html . +https://tomoyo.sourceforge.net/2.6/policy-specification/index.html . Materials we prepared for seminars and symposiums are available at -https://osdn.jp/projects/tomoyo/docs/?category_id=532&language_id=1 . +https://sourceforge.net/projects/tomoyo/files/docs/ . Below lists are chosen from three aspects. What is TOMOYO? TOMOYO Linux Overview - https://osdn.jp/projects/tomoyo/docs/lca2009-takeda.pdf + https://sourceforge.net/projects/tomoyo/files/docs/lca2009-takeda.pdf TOMOYO Linux: pragmatic and manageable security for Linux - https://osdn.jp/projects/tomoyo/docs/freedomhectaipei-tomoyo.pdf + https://sourceforge.net/projects/tomoyo/files/docs/freedomhectaipei-tomoyo.pdf TOMOYO Linux: A Practical Method to Understand and Protect Your Own Linux Box - https://osdn.jp/projects/tomoyo/docs/PacSec2007-en-no-demo.pdf + https://sourceforge.net/projects/tomoyo/files/docs/PacSec2007-en-no-demo.pdf What can TOMOYO do? Deep inside TOMOYO Linux - https://osdn.jp/projects/tomoyo/docs/lca2009-kumaneko.pdf + https://sourceforge.net/projects/tomoyo/files/docs/lca2009-kumaneko.pdf The role of "pathname based access control" in security. - https://osdn.jp/projects/tomoyo/docs/lfj2008-bof.pdf + https://sourceforge.net/projects/tomoyo/files/docs/lfj2008-bof.pdf History of TOMOYO? Realities of Mainlining - https://osdn.jp/projects/tomoyo/docs/lfj2008.pdf - -What is future plan? -==================== - -We believe that inode based security and name based security are complementary -and both should be used together. But unfortunately, so far, we cannot enable -multiple LSM modules at the same time. We feel sorry that you have to give up -SELinux/SMACK/AppArmor etc. when you want to use TOMOYO. - -We hope that LSM becomes stackable in future. Meanwhile, you can use non-LSM -version of TOMOYO, available at http://tomoyo.osdn.jp/1.8/ . -LSM version of TOMOYO is a subset of non-LSM version of TOMOYO. We are planning -to port non-LSM version's functionalities to LSM versions. + https://sourceforge.net/projects/tomoyo/files/docs/lfj2008.pdf diff --git a/MAINTAINERS b/MAINTAINERS index 8754ac2c259dc..4154db83a172b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22679,7 +22679,7 @@ L: tomoyo-users-en@lists.osdn.me (subscribers-only, for users in English) L: tomoyo-dev@lists.osdn.me (subscribers-only, for developers in Japanese) L: tomoyo-users@lists.osdn.me (subscribers-only, for users in Japanese) S: Maintained -W: https://tomoyo.osdn.jp/ +W: https://tomoyo.sourceforge.net/ F: security/tomoyo/ TOPSTAR LAPTOP EXTRAS DRIVER diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig index fad75be5f381d..1e0dd1a6d0b0e 100644 --- a/security/tomoyo/Kconfig +++ b/security/tomoyo/Kconfig @@ -10,7 +10,7 @@ config SECURITY_TOMOYO help This selects TOMOYO Linux, pathname-based access control. Required userspace tools and further information may be - found at . + found at . If you are unsure how to answer this question, answer N. config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index ea3140d510ecb..5c7b059a332aa 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -2787,7 +2787,7 @@ void tomoyo_check_profile(void) else continue; pr_err("Userland tools for TOMOYO 2.6 must be installed and policy must be initialized.\n"); - pr_err("Please see https://tomoyo.osdn.jp/2.6/ for more information.\n"); + pr_err("Please see https://tomoyo.sourceforge.net/2.6/ for more information.\n"); panic("STOP!"); } tomoyo_read_unlock(idx); From 994af1825a2aa286f4903ff64a1c7378b52defe6 Mon Sep 17 00:00:00 2001 From: Nam Cao Date: Thu, 25 Apr 2024 13:52:01 +0200 Subject: [PATCH 205/537] riscv: fix overlap of allocated page and PTR_ERR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On riscv32, it is possible for the last page in virtual address space (0xfffff000) to be allocated. This page overlaps with PTR_ERR, so that shouldn't happen. There is already some code to ensure memblock won't allocate the last page. However, buddy allocator is left unchecked. Fix this by reserving physical memory that would be mapped at virtual addresses greater than 0xfffff000. Reported-by: Björn Töpel Closes: https://lore.kernel.org/linux-riscv/878r1ibpdn.fsf@all.your.base.are.belong.to.us Fixes: 76d2a0493a17 ("RISC-V: Init and Halt Code") Signed-off-by: Nam Cao Cc: Tested-by: Björn Töpel Reviewed-by: Björn Töpel Reviewed-by: Mike Rapoport (IBM) Link: https://lore.kernel.org/r/20240425115201.3044202-1-namcao@linutronix.de Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index e3218d65f21d5..e3405e4b99af5 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -250,18 +250,19 @@ static void __init setup_bootmem(void) kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; /* - * memblock allocator is not aware of the fact that last 4K bytes of - * the addressable memory can not be mapped because of IS_ERR_VALUE - * macro. Make sure that last 4k bytes are not usable by memblock - * if end of dram is equal to maximum addressable memory. For 64-bit - * kernel, this problem can't happen here as the end of the virtual - * address space is occupied by the kernel mapping then this check must - * be done as soon as the kernel mapping base address is determined. + * Reserve physical address space that would be mapped to virtual + * addresses greater than (void *)(-PAGE_SIZE) because: + * - This memory would overlap with ERR_PTR + * - This memory belongs to high memory, which is not supported + * + * This is not applicable to 64-bit kernel, because virtual addresses + * after (void *)(-PAGE_SIZE) are not linearly mapped: they are + * occupied by kernel mapping. Also it is unrealistic for high memory + * to exist on 64-bit platforms. */ if (!IS_ENABLED(CONFIG_64BIT)) { - max_mapped_addr = __pa(~(ulong)0); - if (max_mapped_addr == (phys_ram_end - 1)) - memblock_set_current_limit(max_mapped_addr - 4096); + max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE); + memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); } min_low_pfn = PFN_UP(phys_ram_base); From e2c79b4c5c4d83520abb570ca633ded09621c0a6 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Thu, 30 May 2024 09:44:51 -0700 Subject: [PATCH 206/537] Revert "riscv: mm: accelerate pagefault when badaccess" I accidentally picked up an earlier version of this patch, which had already landed via mm. The patch I picked up contains a bug, which I kept as I thought it was a fix. So let's just revert it. This reverts commit 4c6c0020427a4547845a83f7e4d6085e16c3e24f. Fixes: 4c6c0020427a ("riscv: mm: accelerate pagefault when badaccess") Reviewed-by: Kefeng Wang Reviewed-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20240530164451.21336-1-palmer@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/fault.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index b3fcf7d67efba..5224f37338022 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -293,8 +293,8 @@ void handle_page_fault(struct pt_regs *regs) if (unlikely(access_error(cause, vma))) { vma_end_read(vma); count_vm_vma_lock_event(VMA_LOCK_SUCCESS); - tsk->thread.bad_cause = SEGV_ACCERR; - bad_area_nosemaphore(regs, code, addr); + tsk->thread.bad_cause = cause; + bad_area_nosemaphore(regs, SEGV_ACCERR, addr); return; } From 2317dc2c22cc353b699c7d1db47b2fe91f54055c Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Wed, 29 May 2024 12:19:01 +0200 Subject: [PATCH 207/537] bpf, devmap: Remove unnecessary if check in for loop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The iterator variable dst cannot be NULL and the if check can be removed. Remove it and fix the following Coccinelle/coccicheck warning reported by itnull.cocci: ERROR: iterator variable bound on line 762 cannot be NULL Signed-off-by: Thorsten Blum Signed-off-by: Daniel Borkmann Reviewed-by: Toke Høiland-Jørgensen Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20240529101900.103913-2-thorsten.blum@toblux.com --- kernel/bpf/devmap.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 4e2cdbb5629f2..7f3b34452243c 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -760,9 +760,6 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, for (i = 0; i < dtab->n_buckets; i++) { head = dev_map_index_hash(dtab, i); hlist_for_each_entry_safe(dst, next, head, index_hlist) { - if (!dst) - continue; - if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex)) continue; From 93c1800b3799f17375989b0daf76497dd3e80922 Mon Sep 17 00:00:00 2001 From: David Kaplan Date: Sun, 2 Jun 2024 13:19:09 -0500 Subject: [PATCH 208/537] x86/kexec: Fix bug with call depth tracking The call to cc_platform_has() triggers a fault and system crash if call depth tracking is active because the GS segment has been reset by load_segments() and GS_BASE is now 0 but call depth tracking uses per-CPU variables to operate. Call cc_platform_has() earlier in the function when GS is still valid. [ bp: Massage. ] Fixes: 5d8213864ade ("x86/retbleed: Add SKL return thunk") Signed-off-by: David Kaplan Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Tom Lendacky Cc: Link: https://lore.kernel.org/r/20240603083036.637-1-bp@kernel.org --- arch/x86/kernel/machine_kexec_64.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index b180d8e497c31..cc0f7f70b17ba 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -295,8 +295,15 @@ void machine_kexec_cleanup(struct kimage *image) void machine_kexec(struct kimage *image) { unsigned long page_list[PAGES_NR]; - void *control_page; + unsigned int host_mem_enc_active; int save_ftrace_enabled; + void *control_page; + + /* + * This must be done before load_segments() since if call depth tracking + * is used then GS must be valid to make any function calls. + */ + host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT); #ifdef CONFIG_KEXEC_JUMP if (image->preserve_context) @@ -358,7 +365,7 @@ void machine_kexec(struct kimage *image) (unsigned long)page_list, image->start, image->preserve_context, - cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)); + host_mem_enc_active); #ifdef CONFIG_KEXEC_JUMP if (image->preserve_context) From 1e24c31351787e24b7eebe84866bd55fd62a0aef Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 31 May 2024 16:00:04 -0700 Subject: [PATCH 209/537] cpufreq: intel_pstate: Fix unchecked HWP MSR access Fix unchecked MSR access error for processors with no HWP support. On such processors, maximum frequency can be changed by the system firmware using ACPI event ACPI_PROCESSOR_NOTIFY_HIGEST_PERF_CHANGED. This results in accessing HWP MSR 0x771. Call Trace: generic_exec_single+0x58/0x120 smp_call_function_single+0xbf/0x110 rdmsrl_on_cpu+0x46/0x60 intel_pstate_get_hwp_cap+0x1b/0x70 intel_pstate_update_limits+0x2a/0x60 acpi_processor_notify+0xb7/0x140 acpi_ev_notify_dispatch+0x3b/0x60 HWP MSR 0x771 can be only read on a CPU which supports HWP and enabled. Hence intel_pstate_get_hwp_cap() can only be called when hwp_active is true. Reported-by: Sebastian Andrzej Siewior Closes: https://lore.kernel.org/linux-pm/20240529155740.Hq2Hw7be@linutronix.de/ Fixes: e8217b4bece3 ("cpufreq: intel_pstate: Update the maximum CPU frequency consistently") Tested-by: Sebastian Andrzej Siewior Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4b986c044741e..65d3f79104bd5 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1153,7 +1153,8 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { - intel_pstate_get_hwp_cap(cpudata); + if (hwp_active) + intel_pstate_get_hwp_cap(cpudata); policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; From 2884dc7d08d98a89d8d65121524bb7533183a63a Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sun, 2 Jun 2024 11:27:03 -0700 Subject: [PATCH 210/537] bpf: Fix a potential use-after-free in bpf_link_free() After commit 1a80dbcb2dba, bpf_link can be freed by link->ops->dealloc_deferred, but the code still tests and uses link->ops->dealloc afterward, which leads to a use-after-free as reported by syzbot. Actually, one of them should be sufficient, so just call one of them instead of both. Also add a WARN_ON() in case of any problematic implementation. Fixes: 1a80dbcb2dba ("bpf: support deferring bpf_link dealloc to after RCU grace period") Reported-by: syzbot+1989ee16d94720836244@syzkaller.appspotmail.com Signed-off-by: Cong Wang Signed-off-by: Daniel Borkmann Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20240602182703.207276-1-xiyou.wangcong@gmail.com --- kernel/bpf/syscall.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 2222c3ff88e7f..f45ed6adc092a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2998,6 +2998,7 @@ static int bpf_obj_get(const union bpf_attr *attr) void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, struct bpf_prog *prog) { + WARN_ON(ops->dealloc && ops->dealloc_deferred); atomic64_set(&link->refcnt, 1); link->type = type; link->id = 0; @@ -3056,16 +3057,17 @@ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) /* bpf_link_free is guaranteed to be called from process context */ static void bpf_link_free(struct bpf_link *link) { + const struct bpf_link_ops *ops = link->ops; bool sleepable = false; bpf_link_free_id(link->id); if (link->prog) { sleepable = link->prog->sleepable; /* detach BPF program, clean up used resources */ - link->ops->release(link); + ops->release(link); bpf_prog_put(link->prog); } - if (link->ops->dealloc_deferred) { + if (ops->dealloc_deferred) { /* schedule BPF link deallocation; if underlying BPF program * is sleepable, we need to first wait for RCU tasks trace * sync, then go through "classic" RCU grace period @@ -3074,9 +3076,8 @@ static void bpf_link_free(struct bpf_link *link) call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); else call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); - } - if (link->ops->dealloc) - link->ops->dealloc(link); + } else if (ops->dealloc) + ops->dealloc(link); } static void bpf_link_put_deferred(struct work_struct *work) From b97e8a2f7130a4b30d1502003095833d16c028b3 Mon Sep 17 00:00:00 2001 From: Hagar Hemdan Date: Fri, 31 May 2024 16:21:44 +0000 Subject: [PATCH 211/537] irqchip/gic-v3-its: Fix potential race condition in its_vlpi_prop_update() its_vlpi_prop_update() calls lpi_write_config() which obtains the mapping information for a VLPI without lock held. So it could race with its_vlpi_unmap(). Since all calls from its_irq_set_vcpu_affinity() require the same lock to be held, hoist the locking there instead of sprinkling the locking all over the place. This bug was discovered using Coverity Static Analysis Security Testing (SAST) by Synopsys, Inc. [ tglx: Use guard() instead of goto ] Fixes: 015ec0386ab6 ("irqchip/gic-v3-its: Add VLPI configuration handling") Suggested-by: Marc Zyngier Signed-off-by: Hagar Hemdan Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20240531162144.28650-1-hagarhem@amazon.com --- drivers/irqchip/irq-gic-v3-its.c | 44 +++++++++----------------------- 1 file changed, 12 insertions(+), 32 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 40ebf1726393c..3c755d5dad6e6 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1846,28 +1846,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; if (!info->map) return -EINVAL; - raw_spin_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm) { struct its_vlpi_map *maps; maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), GFP_ATOMIC); - if (!maps) { - ret = -ENOMEM; - goto out; - } + if (!maps) + return -ENOMEM; its_dev->event_map.vm = info->map->vm; its_dev->event_map.vlpi_maps = maps; } else if (its_dev->event_map.vm != info->map->vm) { - ret = -EINVAL; - goto out; + return -EINVAL; } /* Get our private copy of the mapping information */ @@ -1899,46 +1893,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) its_dev->event_map.nr_vlpis++; } -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_vlpi_map *map; - int ret = 0; - - raw_spin_lock(&its_dev->event_map.vlpi_lock); map = get_vlpi_map(d); - if (!its_dev->event_map.vm || !map) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !map) + return -EINVAL; /* Copy our mapping information to the incoming request */ *info->map = *map; -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_unmap(struct irq_data *d) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); - int ret = 0; - - raw_spin_lock(&its_dev->event_map.vlpi_lock); - if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { - ret = -EINVAL; - goto out; - } + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; /* Drop the virtual mapping */ its_send_discard(its_dev, event); @@ -1962,9 +1942,7 @@ static int its_vlpi_unmap(struct irq_data *d) kfree(its_dev->event_map.vlpi_maps); } -out: - raw_spin_unlock(&its_dev->event_map.vlpi_lock); - return ret; + return 0; } static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) @@ -1992,6 +1970,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) if (!is_v4(its_dev->its)) return -EINVAL; + guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock); + /* Unmap request? */ if (!info) return its_vlpi_unmap(d); From 27bd5fdc24c0d5d1306f968ef24105c4577242b0 Mon Sep 17 00:00:00 2001 From: Nikunj A Dadhania Date: Fri, 31 May 2024 04:46:42 +0000 Subject: [PATCH 212/537] KVM: SEV-ES: Prevent MSR access post VMSA encryption KVM currently allows userspace to read/write MSRs even after the VMSA is encrypted. This can cause unintentional issues if MSR access has side- effects. For ex, while migrating a guest, userspace could attempt to migrate MSR_IA32_DEBUGCTLMSR and end up unintentionally disabling LBRV on the target. Fix this by preventing access to those MSRs which are context switched via the VMSA, once the VMSA is encrypted. Suggested-by: Sean Christopherson Signed-off-by: Nikunj A Dadhania Signed-off-by: Ravi Bangoria Message-ID: <20240531044644.768-2-ravi.bangoria@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1d3c8be39479b..f265361f4518b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2822,10 +2822,24 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr) return 0; } +static bool +sev_es_prevent_msr_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + return sev_es_guest(vcpu->kvm) && + vcpu->arch.guest_state_protected && + svm_msrpm_offset(msr_info->index) != MSR_INVALID && + !msr_write_intercepted(vcpu, msr_info->index); +} + static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_svm *svm = to_svm(vcpu); + if (sev_es_prevent_msr_access(vcpu, msr_info)) { + msr_info->data = 0; + return -EINVAL; + } + switch (msr_info->index) { case MSR_AMD64_TSC_RATIO: if (!msr_info->host_initiated && @@ -2976,6 +2990,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) u32 ecx = msr->index; u64 data = msr->data; + + if (sev_es_prevent_msr_access(vcpu, msr)) + return -EINVAL; + switch (ecx) { case MSR_AMD64_TSC_RATIO: From d922056215617eedfbdbc29fe49953423686fe5e Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Fri, 31 May 2024 04:46:43 +0000 Subject: [PATCH 213/537] KVM: SEV-ES: Disallow SEV-ES guests when X86_FEATURE_LBRV is absent As documented in APM[1], LBR Virtualization must be enabled for SEV-ES guests. So, prevent SEV-ES guests when LBRV support is missing. [1]: AMD64 Architecture Programmer's Manual Pub. 40332, Rev. 4.07 - June 2023, Vol 2, 15.35.2 Enabling SEV-ES. https://bugzilla.kernel.org/attachment.cgi?id=304653 Fixes: 376c6d285017 ("KVM: SVM: Provide support for SEV-ES vCPU creation/loading") Signed-off-by: Ravi Bangoria Message-ID: <20240531044644.768-3-ravi.bangoria@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 6 ++++++ arch/x86/kvm/svm/svm.c | 16 +++++++--------- arch/x86/kvm/svm/svm.h | 1 + 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0623cfaa7bb0e..8b52bbba02c0f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2406,6 +2406,12 @@ void __init sev_hardware_setup(void) if (!boot_cpu_has(X86_FEATURE_SEV_ES)) goto out; + if (!lbrv) { + WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV), + "LBRV must be present for SEV-ES support"); + goto out; + } + /* Has the system been allocated ASIDs for SEV-ES? */ if (min_sev_asid == 1) goto out; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f265361f4518b..223a551bf44e4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -215,7 +215,7 @@ int vgif = true; module_param(vgif, int, 0444); /* enable/disable LBR virtualization */ -static int lbrv = true; +int lbrv = true; module_param(lbrv, int, 0444); static int tsc_scaling = true; @@ -5294,6 +5294,12 @@ static __init int svm_hardware_setup(void) nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS); + if (lbrv) { + if (!boot_cpu_has(X86_FEATURE_LBRV)) + lbrv = false; + else + pr_info("LBR virtualization supported\n"); + } /* * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which * may be modified by svm_adjust_mmio_mask()), as well as nrips. @@ -5347,14 +5353,6 @@ static __init int svm_hardware_setup(void) svm_x86_ops.set_vnmi_pending = NULL; } - - if (lbrv) { - if (!boot_cpu_has(X86_FEATURE_LBRV)) - lbrv = false; - else - pr_info("LBR virtualization supported\n"); - } - if (!enable_pmu) pr_info("PMU virtualization is disabled\n"); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index be57213cd2959..59bbb8122b750 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -39,6 +39,7 @@ extern int vgif; extern bool intercept_smi; extern bool x2avic_enabled; extern bool vnmi; +extern int lbrv; /* * Clean bits in VMCB. From b7e4be0a224fe5c6be30c1c8bdda8d2317ad6ba4 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Fri, 31 May 2024 04:46:44 +0000 Subject: [PATCH 214/537] KVM: SEV-ES: Delegate LBR virtualization to the processor As documented in APM[1], LBR Virtualization must be enabled for SEV-ES guests. Although KVM currently enforces LBRV for SEV-ES guests, there are multiple issues with it: o MSR_IA32_DEBUGCTLMSR is still intercepted. Since MSR_IA32_DEBUGCTLMSR interception is used to dynamically toggle LBRV for performance reasons, this can be fatal for SEV-ES guests. For ex SEV-ES guest on Zen3: [guest ~]# wrmsr 0x1d9 0x4 KVM: entry failed, hardware error 0xffffffff EAX=00000004 EBX=00000000 ECX=000001d9 EDX=00000000 Fix this by never intercepting MSR_IA32_DEBUGCTLMSR for SEV-ES guests. No additional save/restore logic is required since MSR_IA32_DEBUGCTLMSR is of swap type A. o KVM will disable LBRV if userspace sets MSR_IA32_DEBUGCTLMSR before the VMSA is encrypted. Fix this by moving LBRV enablement code post VMSA encryption. [1]: AMD64 Architecture Programmer's Manual Pub. 40332, Rev. 4.07 - June 2023, Vol 2, 15.35.2 Enabling SEV-ES. https://bugzilla.kernel.org/attachment.cgi?id=304653 Fixes: 376c6d285017 ("KVM: SVM: Provide support for SEV-ES vCPU creation/loading") Co-developed-by: Nikunj A Dadhania Signed-off-by: Nikunj A Dadhania Signed-off-by: Ravi Bangoria Message-ID: <20240531044644.768-4-ravi.bangoria@amd.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/sev.c | 13 ++++++++----- arch/x86/kvm/svm/svm.c | 8 +++++++- arch/x86/kvm/svm/svm.h | 3 ++- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 8b52bbba02c0f..95095a233a459 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -779,6 +779,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, */ fpstate_set_confidential(&vcpu->arch.guest_fpu); vcpu->arch.guest_state_protected = true; + + /* + * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it + * only after setting guest_state_protected because KVM_SET_MSRS allows + * dynamic toggling of LBRV (for performance reason) on write access to + * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set. + */ + svm_enable_lbrv(vcpu); return 0; } @@ -3222,7 +3230,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) struct kvm_vcpu *vcpu = &svm->vcpu; svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE; - svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; /* * An SEV-ES guest requires a VMSA area that is a separate from the @@ -3274,10 +3281,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm) /* Clear intercepts on selected MSRs */ set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1); set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1); - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); } void sev_init_vmcb(struct vcpu_svm *svm) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 223a551bf44e4..296c524988f95 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -99,6 +99,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_IA32_SPEC_CTRL, .always = false }, { .index = MSR_IA32_PRED_CMD, .always = false }, { .index = MSR_IA32_FLUSH_CMD, .always = false }, + { .index = MSR_IA32_DEBUGCTLMSR, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, @@ -990,7 +991,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb) vmcb_mark_dirty(to_vmcb, VMCB_LBR); } -static void svm_enable_lbrv(struct kvm_vcpu *vcpu) +void svm_enable_lbrv(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1000,6 +1001,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); + if (sev_es_guest(vcpu->kvm)) + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1); + /* Move the LBR msrs to the vmcb02 so that the guest can see them. */ if (is_guest_mode(vcpu)) svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr); @@ -1009,6 +1013,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); + KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm); + svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 59bbb8122b750..0f1472690b593 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -30,7 +30,7 @@ #define IOPM_SIZE PAGE_SIZE * 3 #define MSRPM_SIZE PAGE_SIZE * 2 -#define MAX_DIRECT_ACCESS_MSRS 47 +#define MAX_DIRECT_ACCESS_MSRS 48 #define MSRPM_OFFSETS 32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; @@ -553,6 +553,7 @@ u32 *svm_vcpu_alloc_msrpm(void); void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); void svm_vcpu_free_msrpm(u32 *msrpm); void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb); +void svm_enable_lbrv(struct kvm_vcpu *vcpu); void svm_update_lbrv(struct kvm_vcpu *vcpu); int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); From 89a58812c47f1823191e9d0b08b53df2dd304ae2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 May 2024 18:03:04 -0700 Subject: [PATCH 215/537] KVM: x86: Drop support for hand tuning APIC timer advancement from userspace Remove support for specifying a static local APIC timer advancement value, and instead present a read-only boolean parameter to let userspace enable or disable KVM's dynamic APIC timer advancement. Realistically, it's all but impossible for userspace to specify an advancement that is more precise than what KVM's adaptive tuning can provide. E.g. a static value needs to be tuned for the exact hardware and kernel, and if KVM is using hrtimers, likely requires additional tuning for the exact configuration of the entire system. Dropping support for a userspace provided value also fixes several flaws in the interface. E.g. KVM interprets a negative value other than -1 as a large advancement, toggling between a negative and positive value yields unpredictable behavior as vCPUs will switch from dynamic to static advancement, changing the advancement in the middle of VM creation can result in different values for vCPUs within a VM, etc. Those flaws are mostly fixable, but there's almost no justification for taking on yet more complexity (it's minimal complexity, but still non-zero). The only arguments against using KVM's adaptive tuning is if a setup needs a higher maximum, or if the adjustments are too reactive, but those are arguments for letting userspace control the absolute max advancement and the granularity of each adjustment, e.g. similar to how KVM provides knobs for halt polling. Link: https://lore.kernel.org/all/20240520115334.852510-1-zhoushuling@huawei.com Cc: Shuling Zhou Cc: Marcelo Tosatti Signed-off-by: Sean Christopherson Message-ID: <20240522010304.1650603-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 39 +++++++++++++++++++++------------------ arch/x86/kvm/lapic.h | 2 +- arch/x86/kvm/x86.c | 11 +---------- 3 files changed, 23 insertions(+), 29 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index ebf41023be382..acd7d48100a1d 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -59,7 +59,17 @@ #define MAX_APIC_VECTOR 256 #define APIC_VECTORS_PER_REG 32 -static bool lapic_timer_advance_dynamic __read_mostly; +/* + * Enable local APIC timer advancement (tscdeadline mode only) with adaptive + * tuning. When enabled, KVM programs the host timer event to fire early, i.e. + * before the deadline expires, to account for the delay between taking the + * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume + * the guest, i.e. so that the interrupt arrives in the guest with minimal + * latency relative to the deadline programmed by the guest. + */ +static bool lapic_timer_advance __read_mostly = true; +module_param(lapic_timer_advance, bool, 0444); + #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */ #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */ #define LAPIC_TIMER_ADVANCE_NS_INIT 1000 @@ -1854,16 +1864,14 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); - if (lapic_timer_advance_dynamic) { - adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline); - /* - * If the timer fired early, reread the TSC to account for the - * overhead of the above adjustment to avoid waiting longer - * than is necessary. - */ - if (guest_tsc < tsc_deadline) - guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); - } + adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline); + + /* + * If the timer fired early, reread the TSC to account for the overhead + * of the above adjustment to avoid waiting longer than is necessary. + */ + if (guest_tsc < tsc_deadline) + guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); if (guest_tsc < tsc_deadline) __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc); @@ -2812,7 +2820,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) return HRTIMER_NORESTART; } -int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) +int kvm_create_lapic(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic; @@ -2845,13 +2853,8 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); apic->lapic_timer.timer.function = apic_timer_fn; - if (timer_advance_ns == -1) { + if (lapic_timer_advance) apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT; - lapic_timer_advance_dynamic = true; - } else { - apic->lapic_timer.timer_advance_ns = timer_advance_ns; - lapic_timer_advance_dynamic = false; - } /* * Stuff the APIC ENABLE bit in lieu of temporarily incrementing diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 0a0ea4b5dd8ce..a69e706b9080a 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -85,7 +85,7 @@ struct kvm_lapic { struct dest_map; -int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns); +int kvm_create_lapic(struct kvm_vcpu *vcpu); void kvm_free_lapic(struct kvm_vcpu *vcpu); int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 082ac6d95a3a0..8c9e4281d978d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -164,15 +164,6 @@ module_param(kvmclock_periodic_sync, bool, 0444); static u32 __read_mostly tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, 0644); -/* - * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables - * adaptive tuning starting from default advancement of 1000ns. '0' disables - * advancement entirely. Any other value is used as-is and disables adaptive - * tuning, i.e. allows privileged userspace to set an exact advancement time. - */ -static int __read_mostly lapic_timer_advance_ns = -1; -module_param(lapic_timer_advance_ns, int, 0644); - static bool __read_mostly vector_hashing = true; module_param(vector_hashing, bool, 0444); @@ -12169,7 +12160,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (r < 0) return r; - r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); + r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; From d6283b160a12010b2113cc64726a3c9eda13dc5f Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 29 May 2024 10:38:04 -0300 Subject: [PATCH 216/537] tools headers uapi: Sync linux/stat.h with the kernel sources to pick STATX_SUBVOL To pick the changes from: 2a82bb02941fb53d ("statx: stx_subvol") This silences this perf build warning: Warning: Kernel ABI header differences: diff -u tools/include/uapi/linux/stat.h include/uapi/linux/stat.h Cc: Adrian Hunter Cc: Christian Brauner Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Kent Overstreet Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZlnK2Fmx_gahzwZI@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/stat.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h index 2f2ee82d55175..67626d5353166 100644 --- a/tools/include/uapi/linux/stat.h +++ b/tools/include/uapi/linux/stat.h @@ -126,8 +126,9 @@ struct statx { __u64 stx_mnt_id; __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */ + __u64 stx_subvol; /* Subvolume identifier */ /* 0xa0 */ - __u64 __spare3[12]; /* Spare space for future expansion */ + __u64 __spare3[11]; /* Spare space for future expansion */ /* 0x100 */ }; @@ -155,6 +156,7 @@ struct statx { #define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ #define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ #define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ +#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ From 18befe4a28403f599115c5ae753cc7f5157af8b7 Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Thu, 30 May 2024 15:07:52 +0300 Subject: [PATCH 217/537] iio: adc: ad7173: Clear append status bit The previous value of the append status bit was not cleared before setting the new value. This caused the bit to remain set after enabling buffered mode for multiple channels and not permit further buffered reads from a single channel after the fact. Fixes: 76a1e6a42802 ("iio: adc: ad7173: add AD7173 driver") Signed-off-by: Dumitru Ceclan Link: https://lore.kernel.org/r/20240530-ad7173-fixes-v3-4-b85f33079e18@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad7173.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index 048127bf979a9..e2f17292409cb 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -543,6 +543,7 @@ static int ad7173_append_status(struct ad_sigma_delta *sd, bool append) unsigned int interface_mode = st->interface_mode; int ret; + interface_mode &= ~AD7173_INTERFACE_DATA_STAT; interface_mode |= AD7173_INTERFACE_DATA_STAT_EN(append); ret = ad_sd_write_reg(&st->sd, AD7173_REG_INTERFACE_MODE, 2, interface_mode); if (ret) From 182bc496dc63ca03986e3c393166477f4a4c2742 Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Thu, 30 May 2024 15:07:53 +0300 Subject: [PATCH 218/537] iio: adc: ad7173: Fix sampling frequency setting This patch fixes two issues regarding the sampling frequency setting: -The attribute was set as per device, not per channel. As such, when setting the sampling frequency, the configuration was always done for the slot 0, and the correct configuration was applied on the next channel configuration call by the LRU mechanism. -The LRU implementation does not take into account external settings of the slot registers. When setting the sampling frequency directly to a slot register in write_raw(), there is no guarantee that other channels were not also using that slot and now incorrectly retain their config as live. Set the sampling frequency attribute as separate in the channel templates. Do not set the sampling directly to the slot register in write_raw(), just mark the config as not live and let the LRU mechanism handle it. As the reg variable is no longer used, remove it. Fixes: 76a1e6a42802 ("iio: adc: ad7173: add AD7173 driver") Signed-off-by: Dumitru Ceclan Link: https://lore.kernel.org/r/20240530-ad7173-fixes-v3-5-b85f33079e18@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad7173.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c index e2f17292409cb..b26d4575e2569 100644 --- a/drivers/iio/adc/ad7173.c +++ b/drivers/iio/adc/ad7173.c @@ -717,7 +717,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev, { struct ad7173_state *st = iio_priv(indio_dev); struct ad7173_channel_config *cfg; - unsigned int freq, i, reg; + unsigned int freq, i; int ret; ret = iio_device_claim_direct_mode(indio_dev); @@ -733,16 +733,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev, cfg = &st->channels[chan->address].cfg; cfg->odr = i; - - if (!cfg->live) - break; - - ret = ad_sd_read_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, ®); - if (ret) - break; - reg &= ~AD7173_FILTER_ODR0_MASK; - reg |= FIELD_PREP(AD7173_FILTER_ODR0_MASK, i); - ret = ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(cfg->cfg_slot), 2, reg); + cfg->live = false; break; default: @@ -804,8 +795,7 @@ static const struct iio_chan_spec ad7173_channel_template = { .type = IIO_VOLTAGE, .indexed = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_SCALE), - .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ), .scan_type = { .sign = 'u', .realbits = 24, @@ -819,8 +809,8 @@ static const struct iio_chan_spec ad7173_temp_iio_channel_template = { .channel = AD7173_AIN_TEMP_POS, .channel2 = AD7173_AIN_TEMP_NEG, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), - .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SAMP_FREQ), .scan_type = { .sign = 'u', .realbits = 24, From 8844ed0a6e063acf7173b231021b2d301e31ded9 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Mon, 27 May 2024 15:01:17 +0000 Subject: [PATCH 219/537] iio: imu: inv_mpu6050: stabilized timestamping in interrupt Use IRQ ONESHOT flag to ensure the timestamp is not updated in the hard handler during the thread handler. And use a fixed value of 1 sample that correspond to this first timestamp. This way we can ensure the timestamp is always corresponding to the value used by the timestamping mechanism. Otherwise, it is possible that between FIFO count read and FIFO processing the timestamp is overwritten in the hard handler. Fixes: 111e1abd0045 ("iio: imu: inv_mpu6050: use the common inv_sensors timestamp module") Cc: stable@vger.kernel.org Signed-off-by: Jean-Baptiste Maneyrol Link: https://lore.kernel.org/r/20240527150117.608792-1-inv.git-commit@tdk.com Signed-off-by: Jonathan Cameron --- drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c | 4 ++-- drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 0dc0f22a55827..3d3b27f28c9d1 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -100,8 +100,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) goto end_session; /* Each FIFO data contains all sensors, so same number for FIFO and sensor data */ fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider); - inv_sensors_timestamp_interrupt(&st->timestamp, nb, pf->timestamp); - inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, nb, 0); + inv_sensors_timestamp_interrupt(&st->timestamp, 1, pf->timestamp); + inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, 1, 0); /* clear internal data buffer for avoiding kernel data leak */ memset(data, 0, sizeof(data)); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c index 1b603567ccc80..84273660ca2eb 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c @@ -300,6 +300,7 @@ int inv_mpu6050_probe_trigger(struct iio_dev *indio_dev, int irq_type) if (!st->trig) return -ENOMEM; + irq_type |= IRQF_ONESHOT; ret = devm_request_threaded_irq(&indio_dev->dev, st->irq, &inv_mpu6050_interrupt_timestamp, &inv_mpu6050_interrupt_handle, From 78f0dfa64cbd05f381849377a32e0a2f1afe9215 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 30 May 2024 09:44:16 +0200 Subject: [PATCH 220/537] iio: inkern: fix channel read regression A recent "cleanup" broke IIO channel read outs and thereby thermal mitigation on the Lenovo ThinkPad X13s by returning zero instead of the expected IIO value type in iio_read_channel_processed_scale(): thermal thermal_zone12: failed to read out thermal zone (-22) Fixes: 3092bde731ca ("iio: inkern: move to the cleanup.h magic") Cc: Nuno Sa Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20240530074416.13697-1-johan+linaro@kernel.org Signed-off-by: Jonathan Cameron --- drivers/iio/inkern.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 52d7732618282..485e6fc44a04c 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -721,7 +721,7 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val, return ret; *val *= scale; - return 0; + return ret; } else { ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); if (ret < 0) From ffbe335b8d471f79b259e950cb20999700670456 Mon Sep 17 00:00:00 2001 From: Matthias Stocker Date: Fri, 31 May 2024 12:37:11 +0200 Subject: [PATCH 221/537] vmxnet3: disable rx data ring on dma allocation failure When vmxnet3_rq_create() fails to allocate memory for rq->data_ring.base, the subsequent call to vmxnet3_rq_destroy_all_rxdataring does not reset rq->data_ring.desc_size for the data ring that failed, which presumably causes the hypervisor to reference it on packet reception. To fix this bug, rq->data_ring.desc_size needs to be set to 0 to tell the hypervisor to disable this feature. [ 95.436876] kernel BUG at net/core/skbuff.c:207! [ 95.439074] invalid opcode: 0000 [#1] PREEMPT SMP NOPTI [ 95.440411] CPU: 7 PID: 0 Comm: swapper/7 Not tainted 6.9.3-dirty #1 [ 95.441558] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 12/12/2018 [ 95.443481] RIP: 0010:skb_panic+0x4d/0x4f [ 95.444404] Code: 4f 70 50 8b 87 c0 00 00 00 50 8b 87 bc 00 00 00 50 ff b7 d0 00 00 00 4c 8b 8f c8 00 00 00 48 c7 c7 68 e8 be 9f e8 63 58 f9 ff <0f> 0b 48 8b 14 24 48 c7 c1 d0 73 65 9f e8 a1 ff ff ff 48 8b 14 24 [ 95.447684] RSP: 0018:ffffa13340274dd0 EFLAGS: 00010246 [ 95.448762] RAX: 0000000000000089 RBX: ffff8fbbc72b02d0 RCX: 000000000000083f [ 95.450148] RDX: 0000000000000000 RSI: 00000000000000f6 RDI: 000000000000083f [ 95.451520] RBP: 000000000000002d R08: 0000000000000000 R09: ffffa13340274c60 [ 95.452886] R10: ffffffffa04ed468 R11: 0000000000000002 R12: 0000000000000000 [ 95.454293] R13: ffff8fbbdab3c2d0 R14: ffff8fbbdbd829e0 R15: ffff8fbbdbd809e0 [ 95.455682] FS: 0000000000000000(0000) GS:ffff8fbeefd80000(0000) knlGS:0000000000000000 [ 95.457178] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 95.458340] CR2: 00007fd0d1f650c8 CR3: 0000000115f28000 CR4: 00000000000406f0 [ 95.459791] Call Trace: [ 95.460515] [ 95.461180] ? __die_body.cold+0x19/0x27 [ 95.462150] ? die+0x2e/0x50 [ 95.462976] ? do_trap+0xca/0x110 [ 95.463973] ? do_error_trap+0x6a/0x90 [ 95.464966] ? skb_panic+0x4d/0x4f [ 95.465901] ? exc_invalid_op+0x50/0x70 [ 95.466849] ? skb_panic+0x4d/0x4f [ 95.467718] ? asm_exc_invalid_op+0x1a/0x20 [ 95.468758] ? skb_panic+0x4d/0x4f [ 95.469655] skb_put.cold+0x10/0x10 [ 95.470573] vmxnet3_rq_rx_complete+0x862/0x11e0 [vmxnet3] [ 95.471853] vmxnet3_poll_rx_only+0x36/0xb0 [vmxnet3] [ 95.473185] __napi_poll+0x2b/0x160 [ 95.474145] net_rx_action+0x2c6/0x3b0 [ 95.475115] handle_softirqs+0xe7/0x2a0 [ 95.476122] __irq_exit_rcu+0x97/0xb0 [ 95.477109] common_interrupt+0x85/0xa0 [ 95.478102] [ 95.478846] [ 95.479603] asm_common_interrupt+0x26/0x40 [ 95.480657] RIP: 0010:pv_native_safe_halt+0xf/0x20 [ 95.481801] Code: 22 d7 e9 54 87 01 00 0f 1f 40 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 f3 0f 1e fa eb 07 0f 00 2d 93 ba 3b 00 fb f4 2c 87 01 00 66 66 2e 0f 1f 84 00 00 00 00 00 90 90 90 90 90 90 [ 95.485563] RSP: 0018:ffffa133400ffe58 EFLAGS: 00000246 [ 95.486882] RAX: 0000000000004000 RBX: ffff8fbbc1d14064 RCX: 0000000000000000 [ 95.488477] RDX: ffff8fbeefd80000 RSI: ffff8fbbc1d14000 RDI: 0000000000000001 [ 95.490067] RBP: ffff8fbbc1d14064 R08: ffffffffa0652260 R09: 00000000000010d3 [ 95.491683] R10: 0000000000000018 R11: ffff8fbeefdb4764 R12: ffffffffa0652260 [ 95.493389] R13: ffffffffa06522e0 R14: 0000000000000001 R15: 0000000000000000 [ 95.495035] acpi_safe_halt+0x14/0x20 [ 95.496127] acpi_idle_do_entry+0x2f/0x50 [ 95.497221] acpi_idle_enter+0x7f/0xd0 [ 95.498272] cpuidle_enter_state+0x81/0x420 [ 95.499375] cpuidle_enter+0x2d/0x40 [ 95.500400] do_idle+0x1e5/0x240 [ 95.501385] cpu_startup_entry+0x29/0x30 [ 95.502422] start_secondary+0x11c/0x140 [ 95.503454] common_startup_64+0x13e/0x141 [ 95.504466] [ 95.505197] Modules linked in: nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 rfkill ip_set nf_tables vsock_loopback vmw_vsock_virtio_transport_common qrtr vmw_vsock_vmci_transport vsock sunrpc binfmt_misc pktcdvd vmw_balloon pcspkr vmw_vmci i2c_piix4 joydev loop dm_multipath nfnetlink zram crct10dif_pclmul crc32_pclmul vmwgfx crc32c_intel polyval_clmulni polyval_generic ghash_clmulni_intel sha512_ssse3 sha256_ssse3 vmxnet3 sha1_ssse3 drm_ttm_helper vmw_pvscsi ttm ata_generic pata_acpi serio_raw scsi_dh_rdac scsi_dh_emc scsi_dh_alua ip6_tables ip_tables fuse [ 95.516536] ---[ end trace 0000000000000000 ]--- Fixes: 6f4833383e85 ("net: vmxnet3: Fix NULL pointer dereference in vmxnet3_rq_rx_complete()") Signed-off-by: Matthias Stocker Reviewed-by: Subbaraya Sundeep Reviewed-by: Ronak Doshi Link: https://lore.kernel.org/r/20240531103711.101961-1-mstocker@barracuda.com Signed-off-by: Jakub Kicinski --- drivers/net/vmxnet3/vmxnet3_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 89ca6e75fcc6b..63822d454c00c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter) rq->data_ring.base, rq->data_ring.basePA); rq->data_ring.base = NULL; - rq->data_ring.desc_size = 0; } + rq->data_ring.desc_size = 0; } } From 2fe40483ec257de2a0d819ef88e3e76c7e261319 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 31 May 2024 13:26:32 +0000 Subject: [PATCH 222/537] ipv6: ioam: block BH from ioam6_output() As explained in commit 1378817486d6 ("tipc: block BH before using dst_cache"), net/core/dst_cache.c helpers need to be called with BH disabled. Disabling preemption in ioam6_output() is not good enough, because ioam6_output() is called from process context, lwtunnel_output() only uses rcu_read_lock(). We might be interrupted by a softirq, re-enter ioam6_output() and corrupt dst_cache data structures. Fix the race by using local_bh_disable() instead of preempt_disable(). Fixes: 8cb3bf8bff3c ("ipv6: ioam: Add support for the ip6ip6 encapsulation") Signed-off-by: Eric Dumazet Cc: Justin Iurman Acked-by: Paolo Abeni Link: https://lore.kernel.org/r/20240531132636.2637995-2-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv6/ioam6_iptunnel.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c index 7563f8c6aa87c..bf7120ecea1eb 100644 --- a/net/ipv6/ioam6_iptunnel.c +++ b/net/ipv6/ioam6_iptunnel.c @@ -351,9 +351,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) { - preempt_disable(); + local_bh_disable(); dst = dst_cache_get(&ilwt->cache); - preempt_enable(); + local_bh_enable(); if (unlikely(!dst)) { struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -373,9 +373,9 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - preempt_disable(); + local_bh_disable(); dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr); - preempt_enable(); + local_bh_enable(); } skb_dst_drop(skb); From db0090c6eb12c31246438b7fe2a8f1b833e7a653 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 31 May 2024 13:26:33 +0000 Subject: [PATCH 223/537] net: ipv6: rpl_iptunnel: block BH in rpl_output() and rpl_input() As explained in commit 1378817486d6 ("tipc: block BH before using dst_cache"), net/core/dst_cache.c helpers need to be called with BH disabled. Disabling preemption in rpl_output() is not good enough, because rpl_output() is called from process context, lwtunnel_output() only uses rcu_read_lock(). We might be interrupted by a softirq, re-enter rpl_output() and corrupt dst_cache data structures. Fix the race by using local_bh_disable() instead of preempt_disable(). Apply a similar change in rpl_input(). Signed-off-by: Eric Dumazet Cc: Alexander Aring Acked-by: Paolo Abeni Link: https://lore.kernel.org/r/20240531132636.2637995-3-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv6/rpl_iptunnel.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index a013b92cbb860..2c83b7586422d 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -212,9 +212,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (unlikely(err)) goto drop; - preempt_disable(); + local_bh_disable(); dst = dst_cache_get(&rlwt->cache); - preempt_enable(); + local_bh_enable(); if (unlikely(!dst)) { struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -234,9 +234,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - preempt_disable(); + local_bh_disable(); dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr); - preempt_enable(); + local_bh_enable(); } skb_dst_drop(skb); @@ -268,23 +268,21 @@ static int rpl_input(struct sk_buff *skb) return err; } - preempt_disable(); + local_bh_disable(); dst = dst_cache_get(&rlwt->cache); - preempt_enable(); if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); if (!dst->error) { - preempt_disable(); dst_cache_set_ip6(&rlwt->cache, dst, &ipv6_hdr(skb)->saddr); - preempt_enable(); } } else { skb_dst_drop(skb); skb_dst_set(skb, dst); } + local_bh_enable(); err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); if (unlikely(err)) From c0b98ac1cc104f48763cdb27b1e9ac25fd81fc90 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 31 May 2024 13:26:34 +0000 Subject: [PATCH 224/537] ipv6: sr: block BH in seg6_output_core() and seg6_input_core() As explained in commit 1378817486d6 ("tipc: block BH before using dst_cache"), net/core/dst_cache.c helpers need to be called with BH disabled. Disabling preemption in seg6_output_core() is not good enough, because seg6_output_core() is called from process context, lwtunnel_output() only uses rcu_read_lock(). We might be interrupted by a softirq, re-enter seg6_output_core() and corrupt dst_cache data structures. Fix the race by using local_bh_disable() instead of preempt_disable(). Apply a similar change in seg6_input_core(). Fixes: fa79581ea66c ("ipv6: sr: fix several BUGs when preemption is enabled") Fixes: 6c8702c60b88 ("ipv6: sr: add support for SRH encapsulation and injection with lwtunnels") Signed-off-by: Eric Dumazet Cc: David Lebrun Acked-by: Paolo Abeni Link: https://lore.kernel.org/r/20240531132636.2637995-4-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv6/seg6_iptunnel.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index a75df2ec8db0d..098632adc9b5a 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -464,23 +464,21 @@ static int seg6_input_core(struct net *net, struct sock *sk, slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); - preempt_disable(); + local_bh_disable(); dst = dst_cache_get(&slwt->cache); - preempt_enable(); if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); if (!dst->error) { - preempt_disable(); dst_cache_set_ip6(&slwt->cache, dst, &ipv6_hdr(skb)->saddr); - preempt_enable(); } } else { skb_dst_drop(skb); skb_dst_set(skb, dst); } + local_bh_enable(); err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); if (unlikely(err)) @@ -536,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk, slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); - preempt_disable(); + local_bh_disable(); dst = dst_cache_get(&slwt->cache); - preempt_enable(); + local_bh_enable(); if (unlikely(!dst)) { struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -558,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk, goto drop; } - preempt_disable(); + local_bh_disable(); dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); - preempt_enable(); + local_bh_enable(); } skb_dst_drop(skb); From cf28ff8e4c02e1ffa850755288ac954b6ff0db8c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 31 May 2024 13:26:35 +0000 Subject: [PATCH 225/537] ila: block BH in ila_output() As explained in commit 1378817486d6 ("tipc: block BH before using dst_cache"), net/core/dst_cache.c helpers need to be called with BH disabled. ila_output() is called from lwtunnel_output() possibly from process context, and under rcu_read_lock(). We might be interrupted by a softirq, re-enter ila_output() and corrupt dst_cache data structures. Fix the race by using local_bh_disable(). Signed-off-by: Eric Dumazet Acked-by: Paolo Abeni Link: https://lore.kernel.org/r/20240531132636.2637995-5-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv6/ila/ila_lwt.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index 0601bad798221..ff7e734e335b0 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb) return orig_dst->lwtstate->orig_output(net, sk, skb); } + local_bh_disable(); dst = dst_cache_get(&ilwt->dst_cache); + local_bh_enable(); if (unlikely(!dst)) { struct ipv6hdr *ip6h = ipv6_hdr(skb); struct flowi6 fl6; @@ -86,8 +88,11 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - if (ilwt->connected) + if (ilwt->connected) { + local_bh_disable(); dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr); + local_bh_enable(); + } } skb_dst_set(skb, dst); From 2fe6fb36c781b50482b1c3323fb526bc07d1af59 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 31 May 2024 13:26:36 +0000 Subject: [PATCH 226/537] net: dst_cache: add two DEBUG_NET warnings After fixing four different bugs involving dst_cache users, it might be worth adding a check about BH being blocked by dst_cache callers. DEBUG_NET_WARN_ON_ONCE(!in_softirq()); It is not fatal, if we missed valid case where no BH deadlock is to be feared, we might change this. Signed-off-by: Eric Dumazet Acked-by: Paolo Abeni Link: https://lore.kernel.org/r/20240531132636.2637995-6-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/core/dst_cache.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c index 6a0482e676d37..70c634b9e7b02 100644 --- a/net/core/dst_cache.c +++ b/net/core/dst_cache.c @@ -27,6 +27,7 @@ struct dst_cache_pcpu { static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache, struct dst_entry *dst, u32 cookie) { + DEBUG_NET_WARN_ON_ONCE(!in_softirq()); dst_release(dst_cache->dst); if (dst) dst_hold(dst); @@ -40,6 +41,7 @@ static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache, { struct dst_entry *dst; + DEBUG_NET_WARN_ON_ONCE(!in_softirq()); dst = idst->dst; if (!dst) goto fail; From c6cab01d7e20a028ffcee1e0a0b782332a16b5e6 Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Fri, 31 May 2024 18:35:43 -0700 Subject: [PATCH 227/537] lib/test_rhashtable: add missing MODULE_DESCRIPTION() macro make allmodconfig && make W=1 C=1 reports: WARNING: modpost: missing MODULE_DESCRIPTION() in lib/test_rhashtable.o Add the missing invocation of the MODULE_DESCRIPTION() macro. Signed-off-by: Jeff Johnson Link: https://lore.kernel.org/r/20240531-md-lib-test_rhashtable-v1-1-cd6d4138f1b6@quicinc.com Signed-off-by: Jakub Kicinski --- lib/test_rhashtable.c | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 42b585208249c..c63db03ebb9dc 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -811,4 +811,5 @@ static void __exit test_rht_exit(void) module_init(test_rht_init); module_exit(test_rht_exit); +MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module"); MODULE_LICENSE("GPL v2"); From 4aa2dcfbad538adf7becd0034a3754e1bd01b2b5 Mon Sep 17 00:00:00 2001 From: Nikita Zhandarovich Date: Fri, 17 May 2024 07:19:14 -0700 Subject: [PATCH 228/537] HID: core: remove unnecessary WARN_ON() in implement() Syzkaller hit a warning [1] in a call to implement() when trying to write a value into a field of smaller size in an output report. Since implement() already has a warn message printed out with the help of hid_warn() and value in question gets trimmed with: ... value &= m; ... WARN_ON may be considered superfluous. Remove it to suppress future syzkaller triggers. [1] WARNING: CPU: 0 PID: 5084 at drivers/hid/hid-core.c:1451 implement drivers/hid/hid-core.c:1451 [inline] WARNING: CPU: 0 PID: 5084 at drivers/hid/hid-core.c:1451 hid_output_report+0x548/0x760 drivers/hid/hid-core.c:1863 Modules linked in: CPU: 0 PID: 5084 Comm: syz-executor424 Not tainted 6.9.0-rc7-syzkaller-00183-gcf87f46fd34d #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/02/2024 RIP: 0010:implement drivers/hid/hid-core.c:1451 [inline] RIP: 0010:hid_output_report+0x548/0x760 drivers/hid/hid-core.c:1863 ... Call Trace: __usbhid_submit_report drivers/hid/usbhid/hid-core.c:591 [inline] usbhid_submit_report+0x43d/0x9e0 drivers/hid/usbhid/hid-core.c:636 hiddev_ioctl+0x138b/0x1f00 drivers/hid/usbhid/hiddev.c:726 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:904 [inline] __se_sys_ioctl+0xfc/0x170 fs/ioctl.c:890 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xf5/0x240 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f ... Fixes: 95d1c8951e5b ("HID: simplify implement() a bit") Reported-by: Suggested-by: Alan Stern Signed-off-by: Nikita Zhandarovich Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index b1fa0378e8f4a..74efda212c55f 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1448,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report, hid_warn(hid, "%s() called with too large value %d (n: %d)! (%s)\n", __func__, value, n, current->comm); - WARN_ON(1); value &= m; } } From 3ec8ebd8a5b782d56347ae884de880af26f93996 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Mon, 27 May 2024 16:22:34 +0300 Subject: [PATCH 229/537] EDAC/amd64: Convert PCIBIOS_* return codes to errnos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gpu_get_node_map() uses pci_read_config_dword() that returns PCIBIOS_* codes. The return code is then returned all the way into the module init function amd64_edac_init() that returns it as is. The module init functions, however, should return normal errnos. Convert PCIBIOS_* returns code using pcibios_err_to_errno() into normal errno before returning it from gpu_get_node_map(). For consistency, convert also the other similar cases which return PCIBIOS_* codes even if they do not have any bugs at the moment. Fixes: 4251566ebc1c ("EDAC/amd64: Cache and use GPU node map") Signed-off-by: Ilpo Järvinen Signed-off-by: Borislav Petkov (AMD) Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240527132236.13875-1-ilpo.jarvinen@linux.intel.com --- drivers/edac/amd64_edac.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 1f3520d768613..a17f3c0cdfa60 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -81,7 +81,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, amd64_warn("%s: error reading F%dx%03x.\n", func, PCI_FUNC(pdev->devfn), offset); - return err; + return pcibios_err_to_errno(err); } int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, @@ -94,7 +94,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, amd64_warn("%s: error writing to F%dx%03x.\n", func, PCI_FUNC(pdev->devfn), offset); - return err; + return pcibios_err_to_errno(err); } /* @@ -1025,8 +1025,10 @@ static int gpu_get_node_map(struct amd64_pvt *pvt) } ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp); - if (ret) + if (ret) { + ret = pcibios_err_to_errno(ret); goto out; + } gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp); gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp); From f8367a74aebf88dc8b58a0db6a6c90b4cb8fc9d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Mon, 27 May 2024 16:22:35 +0300 Subject: [PATCH 230/537] EDAC/igen6: Convert PCIBIOS_* return codes to errnos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit errcmd_enable_error_reporting() uses pci_{read,write}_config_word() that return PCIBIOS_* codes. The return code is then returned all the way into the probe function igen6_probe() that returns it as is. The probe functions, however, should return normal errnos. Convert PCIBIOS_* returns code using pcibios_err_to_errno() into normal errno before returning it from errcmd_enable_error_reporting(). Fixes: 10590a9d4f23 ("EDAC/igen6: Add EDAC driver for Intel client SoCs using IBECC") Signed-off-by: Ilpo Järvinen Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Qiuxu Zhuo Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240527132236.13875-2-ilpo.jarvinen@linux.intel.com --- drivers/edac/igen6_edac.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index cdd8480e73687..dbe9fe5f2ca6c 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -800,7 +800,7 @@ static int errcmd_enable_error_reporting(bool enable) rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd); if (rc) - return rc; + return pcibios_err_to_errno(rc); if (enable) errcmd |= ERRCMD_CE | ERRSTS_UE; @@ -809,7 +809,7 @@ static int errcmd_enable_error_reporting(bool enable) rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd); if (rc) - return rc; + return pcibios_err_to_errno(rc); return 0; } From a535d59432370343058755100ee75ab03c0e3f91 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 30 May 2024 16:26:07 -0700 Subject: [PATCH 231/537] net: tls: fix marking packets as decrypted For TLS offload we mark packets with skb->decrypted to make sure they don't escape the host without getting encrypted first. The crypto state lives in the socket, so it may get detached by a call to skb_orphan(). As a safety check - the egress path drops all packets with skb->decrypted and no "crypto-safe" socket. The skb marking was added to sendpage only (and not sendmsg), because tls_device injected data into the TCP stack using sendpage. This special case was missed when sendpage got folded into sendmsg. Fixes: c5c37af6ecad ("tcp: Convert do_tcp_sendpages() to use MSG_SPLICE_PAGES") Signed-off-by: Jakub Kicinski Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20240530232607.82686-1-kuba@kernel.org Signed-off-by: Paolo Abeni --- net/ipv4/tcp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 681b54e1f3a64..4d8cc2ebb64c7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1165,6 +1165,9 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) process_backlog++; +#ifdef CONFIG_SKB_DECRYPTED + skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); +#endif tcp_skb_entail(sk, skb); copy = size_goal; From c3552ab19aeb8101b751e7c7ad45deab9e1134e1 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Fri, 24 May 2024 17:15:42 +0200 Subject: [PATCH 232/537] staging: vchiq_debugfs: Fix NPD in vchiq_dump_state The commit 42a2f6664e18 ("staging: vc04_services: Move global g_state to vchiq_state") falsely assumed that the debugfs entry vchiq/state was created with vchiq_instance as data. This causes now a NULL pointer derefence while trying to dump the vchiq state. So fix this by passing vchiq_state as data, because this is the relevant part here. Fixes: 42a2f6664e18 ("staging: vc04_services: Move global g_state to vchiq_state") Signed-off-by: Stefan Wahren Reviewed-by: Umang Jain Link: https://lore.kernel.org/r/20240524151542.19415-1-wahrenst@gmx.net Signed-off-by: Greg Kroah-Hartman --- .../staging/vc04_services/interface/vchiq_arm/vchiq_arm.c | 2 +- .../vc04_services/interface/vchiq_arm/vchiq_debugfs.c | 8 ++++---- .../vc04_services/interface/vchiq_arm/vchiq_debugfs.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 297af1d80b12b..69daeba974f2d 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1759,7 +1759,7 @@ static int vchiq_probe(struct platform_device *pdev) if (err) goto failed_platform_init; - vchiq_debugfs_init(); + vchiq_debugfs_init(&mgmt->state); dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n", VCHIQ_VERSION, VCHIQ_VERSION_MIN); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c index 54e7bf029d9a1..1f74d0bb33bae 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c @@ -42,9 +42,9 @@ static int debugfs_trace_show(struct seq_file *f, void *offset) static int vchiq_dump_show(struct seq_file *f, void *offset) { - struct vchiq_instance *instance = f->private; + struct vchiq_state *state = f->private; - vchiq_dump_state(f, instance->state); + vchiq_dump_state(f, state); return 0; } @@ -121,12 +121,12 @@ void vchiq_debugfs_remove_instance(struct vchiq_instance *instance) debugfs_remove_recursive(node->dentry); } -void vchiq_debugfs_init(void) +void vchiq_debugfs_init(struct vchiq_state *state) { vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL); vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir); - debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, NULL, + debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, state, &vchiq_dump_fops); } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h index e9bf055a4ca95..fabffd81b1ec9 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h @@ -10,7 +10,7 @@ struct vchiq_debugfs_node { struct dentry *dentry; }; -void vchiq_debugfs_init(void); +void vchiq_debugfs_init(struct vchiq_state *state); void vchiq_debugfs_deinit(void); From a295ec52c8624883885396fde7b4df1a179627c3 Mon Sep 17 00:00:00 2001 From: "Kun(llfl)" Date: Thu, 9 May 2024 08:42:20 +0800 Subject: [PATCH 233/537] iommu/amd: Fix sysfs leak in iommu init During the iommu initialization, iommu_init_pci() adds sysfs nodes. However, these nodes aren't remove in free_iommu_resources() subsequently. Fixes: 39ab9555c241 ("iommu: Add sysfs bindings for struct iommu_device") Signed-off-by: Kun(llfl) Reviewed-by: Suravee Suthikulpanit Link: https://lore.kernel.org/r/c8e0d11c6ab1ee48299c288009cf9c5dae07b42d.1715215003.git.llfl@linux.alibaba.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/init.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index a18e74878f68a..27e2937270950 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1626,8 +1626,17 @@ static void __init free_pci_segments(void) } } +static void __init free_sysfs(struct amd_iommu *iommu) +{ + if (iommu->iommu.dev) { + iommu_device_unregister(&iommu->iommu); + iommu_device_sysfs_remove(&iommu->iommu); + } +} + static void __init free_iommu_one(struct amd_iommu *iommu) { + free_sysfs(iommu); free_cwwb_sem(iommu); free_command_buffer(iommu); free_event_buffer(iommu); From cc8d89d0637990c66440a226f443d95340979a04 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 20 May 2024 20:14:44 +0100 Subject: [PATCH 234/537] iommu/dma: Fix domain init Despite carefully rewording the kerneldoc to describe the new direct interaction with dma_range_map, it seems I managed to confuse myself in removing the redundant force_aperture check and ended up making the code not do that at all. This led to dma_range_maps inadvertently being able to set iovad->start_pfn = 0, and all the nonsensical chaos which ensues from there. Restore the correct behaviour of constraining base_pfn to the domain aperture regardless of dma_range_map, and not trying to apply dma_range_map constraints to the basic IOVA domain since they will be properly handled with reserved regions later. Reported-by: Jon Hunter Reported-by: Jerry Snitselaar Fixes: ad4750b07d34 ("iommu/dma: Make limit checks self-contained") Signed-off-by: Robin Murphy Tested-by: Jerry Snitselaar Reviewed-by: Jerry Snitselaar Link: https://lore.kernel.org/r/721fa6baebb0924aa40db0b8fb86bcb4538434af.1716232484.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f731e4b2a4172..43520e7275cc1 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -686,15 +686,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev /* Check the domain allows at least some access to the device... */ if (map) { - dma_addr_t base = dma_range_map_min(map); - if (base > domain->geometry.aperture_end || + if (dma_range_map_min(map) > domain->geometry.aperture_end || dma_range_map_max(map) < domain->geometry.aperture_start) { pr_warn("specified DMA range outside IOMMU capability\n"); return -EFAULT; } - /* ...then finally give it a kicking to make sure it fits */ - base_pfn = max(base, domain->geometry.aperture_start) >> order; } + /* ...then finally give it a kicking to make sure it fits */ + base_pfn = max_t(unsigned long, base_pfn, + domain->geometry.aperture_start >> order); /* start_pfn is always nonzero for an already-initialised domain */ mutex_lock(&cookie->mutex); From 89e8a2366e3bce584b6c01549d5019c5cda1205e Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 28 May 2024 12:25:28 +0800 Subject: [PATCH 235/537] iommu: Return right value in iommu_sva_bind_device() iommu_sva_bind_device() should return either a sva bond handle or an ERR_PTR value in error cases. Existing drivers (idxd and uacce) only check the return value with IS_ERR(). This could potentially lead to a kernel NULL pointer dereference issue if the function returns NULL instead of an error pointer. In reality, this doesn't cause any problems because iommu_sva_bind_device() only returns NULL when the kernel is not configured with CONFIG_IOMMU_SVA. In this case, iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) will return an error, and the device drivers won't call iommu_sva_bind_device() at all. Fixes: 26b25a2b98e4 ("iommu: Bind process address spaces to devices") Signed-off-by: Lu Baolu Reviewed-by: Jean-Philippe Brucker Reviewed-by: Kevin Tian Reviewed-by: Vasant Hegde Link: https://lore.kernel.org/r/20240528042528.71396-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 7bc8dff7cf6d7..17b3f36ad843e 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1533,7 +1533,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) { - return NULL; + return ERR_PTR(-ENODEV); } static inline void iommu_sva_unbind_device(struct iommu_sva *handle) From 998a0a362b0b4cf501161c3319e6994d37c45a8c Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Wed, 29 May 2024 11:39:00 +0000 Subject: [PATCH 236/537] iommu/amd: Fix workqueue name Workqueue name length is crossing WQ_NAME_LEN limit. Fix it by changing name format. New format : "iopf_queue/amdvi-" kernel warning: [ 11.146912] workqueue: name exceeds WQ_NAME_LEN. Truncating to: iopf_queue/amdiommu-0xc002-iopf Reported-by: Borislav Petkov Fixes: 61928bab9d26 ("iommu/amd: Define per-IOMMU iopf_queue") Signed-off-by: Vasant Hegde Acked-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240529113900.5798-1-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/ppr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c index 091423bb8aac8..31d98a2a11a6e 100644 --- a/drivers/iommu/amd/ppr.c +++ b/drivers/iommu/amd/ppr.c @@ -222,8 +222,7 @@ int amd_iommu_iopf_init(struct amd_iommu *iommu) if (iommu->iopf_queue) return ret; - snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), - "amdiommu-%#x-iopfq", + snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x", PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid)); iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name); From 48dc345a23b984c457d1c5878168d026c500618f Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Thu, 30 May 2024 07:11:18 +0000 Subject: [PATCH 237/537] iommu/amd: Check EFR[EPHSup] bit before enabling PPR Check for EFR[EPHSup] bit before enabling PPR. This bit must be set to enable PPR. Reported-by: Borislav Petkov Fixes: c4cb23111103 ("iommu/amd: Add support for enable/disable IOPF") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218900 Tested-by: Borislav Petkov Tested-by: Jean-Christophe Guillain Signed-off-by: Vasant Hegde Reviewed-by: Suravee Suthikulpanit Link: https://lore.kernel.org/r/20240530071118.10297-1-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 2fde1302a5843..2d5945c982bde 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -129,7 +129,8 @@ static inline int check_feature_gpt_level(void) static inline bool amd_iommu_gt_ppr_supported(void) { return (check_feature(FEATURE_GT) && - check_feature(FEATURE_PPR)); + check_feature(FEATURE_PPR) && + check_feature(FEATURE_EPHSUP)); } static inline u64 iommu_virt_to_phys(void *vaddr) From 526606b0a1998b0791b42c199d53550c3ba724b5 Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Thu, 30 May 2024 08:48:01 +0000 Subject: [PATCH 238/537] iommu/amd: Fix Invalid wait context issue With commit c4cb23111103 ("iommu/amd: Add support for enable/disable IOPF") we are hitting below issue. This happens because in IOPF enablement path it holds spin lock with irq disable and then tries to take mutex lock. dmesg: ----- [ 0.938739] ============================= [ 0.938740] [ BUG: Invalid wait context ] [ 0.938742] 6.10.0-rc1+ #1 Not tainted [ 0.938745] ----------------------------- [ 0.938746] swapper/0/1 is trying to lock: [ 0.938748] ffffffff8c9f01d8 (&port_lock_key){....}-{3:3}, at: serial8250_console_write+0x78/0x4a0 [ 0.938767] other info that might help us debug this: [ 0.938768] context-{5:5} [ 0.938769] 7 locks held by swapper/0/1: [ 0.938772] #0: ffff888101a91310 (&group->mutex){+.+.}-{4:4}, at: bus_iommu_probe+0x70/0x160 [ 0.938790] #1: ffff888101d1f1b8 (&domain->lock){....}-{3:3}, at: amd_iommu_attach_device+0xa5/0x700 [ 0.938799] #2: ffff888101cc3d18 (&dev_data->lock){....}-{3:3}, at: amd_iommu_attach_device+0xc5/0x700 [ 0.938806] #3: ffff888100052830 (&iommu->lock){....}-{2:2}, at: amd_iommu_iopf_add_device+0x3f/0xa0 [ 0.938813] #4: ffffffff8945a340 (console_lock){+.+.}-{0:0}, at: _printk+0x48/0x50 [ 0.938822] #5: ffffffff8945a390 (console_srcu){....}-{0:0}, at: console_flush_all+0x58/0x4e0 [ 0.938867] #6: ffffffff82459f80 (console_owner){....}-{0:0}, at: console_flush_all+0x1f0/0x4e0 [ 0.938872] stack backtrace: [ 0.938874] CPU: 2 PID: 1 Comm: swapper/0 Not tainted 6.10.0-rc1+ #1 [ 0.938877] Hardware name: HP HP EliteBook 745 G3/807E, BIOS N73 Ver. 01.39 04/16/2019 Fix above issue by re-arranging code in attach device path: - move device PASID/IOPF enablement outside lock in AMD IOMMU driver. This is safe as core layer holds group->mutex lock before calling iommu_ops->attach_dev. Reported-by: Borislav Petkov Reported-by: Mikhail Gavrilov Reported-by: Chris Bainbridge Fixes: c4cb23111103 ("iommu/amd: Add support for enable/disable IOPF") Tested-by: Borislav Petkov Tested-by: Chris Bainbridge Tested-by: Mikhail Gavrilov Signed-off-by: Vasant Hegde Link: https://lore.kernel.org/r/20240530084801.10758-1-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/iommu.c | 48 +++++++++++++++++++-------------------- drivers/iommu/amd/ppr.c | 22 ++++-------------- 2 files changed, 28 insertions(+), 42 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 52d83730a22ad..c2703599bb166 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2032,7 +2032,6 @@ static int do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); - struct pci_dev *pdev; int ret = 0; /* Update data structures */ @@ -2047,30 +2046,13 @@ static int do_attach(struct iommu_dev_data *dev_data, domain->dev_iommu[iommu->index] += 1; domain->dev_cnt += 1; - pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + /* Setup GCR3 table */ if (pdom_is_sva_capable(domain)) { ret = init_gcr3_table(dev_data, domain); if (ret) return ret; - - if (pdev) { - pdev_enable_caps(pdev); - - /* - * Device can continue to function even if IOPF - * enablement failed. Hence in error path just - * disable device PRI support. - */ - if (amd_iommu_iopf_add_device(iommu, dev_data)) - pdev_disable_cap_pri(pdev); - } - } else if (pdev) { - pdev_enable_cap_ats(pdev); } - /* Update device table */ - amd_iommu_dev_update_dte(dev_data, true); - return ret; } @@ -2163,6 +2145,11 @@ static void detach_device(struct device *dev) do_detach(dev_data); +out: + spin_unlock(&dev_data->lock); + + spin_unlock_irqrestore(&domain->lock, flags); + /* Remove IOPF handler */ if (ppr) amd_iommu_iopf_remove_device(iommu, dev_data); @@ -2170,10 +2157,6 @@ static void detach_device(struct device *dev) if (dev_is_pci(dev)) pdev_disable_caps(to_pci_dev(dev)); -out: - spin_unlock(&dev_data->lock); - - spin_unlock_irqrestore(&domain->lock, flags); } static struct iommu_device *amd_iommu_probe_device(struct device *dev) @@ -2485,6 +2468,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct protection_domain *domain = to_pdomain(dom); struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); + struct pci_dev *pdev; int ret; /* @@ -2517,7 +2501,23 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } #endif - iommu_completion_wait(iommu); + pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + if (pdev && pdom_is_sva_capable(domain)) { + pdev_enable_caps(pdev); + + /* + * Device can continue to function even if IOPF + * enablement failed. Hence in error path just + * disable device PRI support. + */ + if (amd_iommu_iopf_add_device(iommu, dev_data)) + pdev_disable_cap_pri(pdev); + } else if (pdev) { + pdev_enable_cap_ats(pdev); + } + + /* Update device table */ + amd_iommu_dev_update_dte(dev_data, true); return ret; } diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c index 31d98a2a11a6e..7c67d69f0b8ca 100644 --- a/drivers/iommu/amd/ppr.c +++ b/drivers/iommu/amd/ppr.c @@ -248,40 +248,26 @@ void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt, int amd_iommu_iopf_add_device(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) { - unsigned long flags; int ret = 0; if (!dev_data->pri_enabled) return ret; - raw_spin_lock_irqsave(&iommu->lock, flags); - - if (!iommu->iopf_queue) { - ret = -EINVAL; - goto out_unlock; - } + if (!iommu->iopf_queue) + return -EINVAL; ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev); if (ret) - goto out_unlock; + return ret; dev_data->ppr = true; - -out_unlock: - raw_spin_unlock_irqrestore(&iommu->lock, flags); - return ret; + return 0; } /* Its assumed that caller has verified that device was added to iopf queue */ void amd_iommu_iopf_remove_device(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) { - unsigned long flags; - - raw_spin_lock_irqsave(&iommu->lock, flags); - iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev); dev_data->ppr = false; - - raw_spin_unlock_irqrestore(&iommu->lock, flags); } From b19ab7ee2c4c1ec5f27c18413c3ab63907f7d55c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Tue, 14 May 2024 17:04:29 +0300 Subject: [PATCH 239/537] tty: n_tty: Fix buffer offsets when lookahead is used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When lookahead has "consumed" some characters (la_count > 0), n_tty_receive_buf_standard() and n_tty_receive_buf_closing() for characters beyond the la_count are given wrong cp/fp offsets which leads to duplicating and losing some characters. If la_count > 0, correct buffer pointers and make count consistent too (the latter is not strictly necessary to fix the issue but seems more logical to adjust all variables immediately to keep state consistent). Reported-by: Vadym Krevs Fixes: 6bb6fa6908eb ("tty: Implement lookahead to process XON/XOFF timely") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218834 Tested-by: Vadym Krevs Cc: stable@vger.kernel.org Signed-off-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240514140429.12087-1-ilpo.jarvinen@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/n_tty.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index f252d0b5a434e..5e9ca4376d686 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1619,15 +1619,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, else if (ldata->raw || (L_EXTPROC(tty) && !preops)) n_tty_receive_buf_raw(tty, cp, fp, count); else if (tty->closing && !L_EXTPROC(tty)) { - if (la_count > 0) + if (la_count > 0) { n_tty_receive_buf_closing(tty, cp, fp, la_count, true); - if (count > la_count) - n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false); + cp += la_count; + if (fp) + fp += la_count; + count -= la_count; + } + if (count > 0) + n_tty_receive_buf_closing(tty, cp, fp, count, false); } else { - if (la_count > 0) + if (la_count > 0) { n_tty_receive_buf_standard(tty, cp, fp, la_count, true); - if (count > la_count) - n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false); + cp += la_count; + if (fp) + fp += la_count; + count -= la_count; + } + if (count > 0) + n_tty_receive_buf_standard(tty, cp, fp, count, false); flush_echoes(tty); if (tty->ops->flush_chars) From 87d80bfbd577912462061b1a45c0ed9c7fcb872f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 14 May 2024 22:05:53 +0300 Subject: [PATCH 240/537] serial: 8250_dw: Don't use struct dw8250_data outside of 8250_dw The container of the struct dw8250_port_data is private to the actual driver. In particular, 8250_lpss and 8250_dw use different data types that are assigned to the UART port private_data. Hence, it must not be used outside the specific driver. Currently the only cpr_val is required by the common code, make it be available via struct dw8250_port_data. This fixes the UART breakage on Intel Galileo boards. Fixes: 593dea000bc1 ("serial: 8250: dw: Allow to use a fallback CPR value if not synthesized") Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240514190730.2787071-2-andriy.shevchenko@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_dw.c | 9 +++++++-- drivers/tty/serial/8250/8250_dwlib.c | 3 +-- drivers/tty/serial/8250/8250_dwlib.h | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index ba9f4dc4e71da..667e16897bde7 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -55,6 +55,7 @@ #define DW_UART_QUIRK_SKIP_SET_RATE BIT(2) #define DW_UART_QUIRK_IS_DMA_FC BIT(3) #define DW_UART_QUIRK_APMC0D08 BIT(4) +#define DW_UART_QUIRK_CPR_VALUE BIT(5) static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb) { @@ -432,6 +433,10 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p) static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) { unsigned int quirks = data->pdata ? data->pdata->quirks : 0; + u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0; + + if (quirks & DW_UART_QUIRK_CPR_VALUE) + data->data.cpr_value = cpr_value; #ifdef CONFIG_64BIT if (quirks & DW_UART_QUIRK_OCTEON) { @@ -714,8 +719,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = { static const struct dw8250_platform_data dw8250_renesas_rzn1_data = { .usr_reg = DW_UART_USR, - .cpr_val = 0x00012f32, - .quirks = DW_UART_QUIRK_IS_DMA_FC, + .cpr_value = 0x00012f32, + .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC, }; static const struct dw8250_platform_data dw8250_starfive_jh7100_data = { diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c index 3e33ddf7bc800..5a2520943dfd5 100644 --- a/drivers/tty/serial/8250/8250_dwlib.c +++ b/drivers/tty/serial/8250/8250_dwlib.c @@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = { void dw8250_setup_port(struct uart_port *p) { struct dw8250_port_data *pd = p->private_data; - struct dw8250_data *data = to_dw8250_data(pd); struct uart_8250_port *up = up_to_u8250p(p); u32 reg, old_dlf; @@ -278,7 +277,7 @@ void dw8250_setup_port(struct uart_port *p) reg = dw8250_readl_ext(p, DW_UART_CPR); if (!reg) { - reg = data->pdata->cpr_val; + reg = pd->cpr_value; dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg); } if (!reg) diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h index f13e91f2cace9..794a9014cdac1 100644 --- a/drivers/tty/serial/8250/8250_dwlib.h +++ b/drivers/tty/serial/8250/8250_dwlib.h @@ -19,6 +19,7 @@ struct dw8250_port_data { struct uart_8250_dma dma; /* Hardware configuration */ + u32 cpr_value; u8 dlf_size; /* RS485 variables */ @@ -27,7 +28,7 @@ struct dw8250_port_data { struct dw8250_platform_data { u8 usr_reg; - u32 cpr_val; + u32 cpr_value; unsigned int quirks; }; From 2c94512055f362dd789e0f87b8566feeddec83c9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 14 May 2024 22:05:54 +0300 Subject: [PATCH 241/537] serial: 8250_dw: Revert "Move definitions to the shared header" This reverts commit d9666dfb314e1ffd6eb9c3c4243fe3e094c047a7. The container of the struct dw8250_port_data is private to the actual driver. In particular, 8250_lpss and 8250_dw use different data types that are assigned to the UART port private_data. Hence, it must not be used outside the specific driver. Fix the mistake made in the past by moving the respective definitions to the specific driver. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240514190730.2787071-3-andriy.shevchenko@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_dw.c | 27 +++++++++++++++++++++++ drivers/tty/serial/8250/8250_dwlib.h | 32 ---------------------------- 2 files changed, 27 insertions(+), 32 deletions(-) diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 667e16897bde7..fb809e32c6ae2 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -57,6 +57,33 @@ #define DW_UART_QUIRK_APMC0D08 BIT(4) #define DW_UART_QUIRK_CPR_VALUE BIT(5) +struct dw8250_platform_data { + u8 usr_reg; + u32 cpr_value; + unsigned int quirks; +}; + +struct dw8250_data { + struct dw8250_port_data data; + const struct dw8250_platform_data *pdata; + + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct notifier_block clk_notifier; + struct work_struct clk_work; + struct reset_control *rst; + + unsigned int skip_autocfg:1; + unsigned int uart_16550_compatible:1; +}; + +static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) +{ + return container_of(data, struct dw8250_data, data); +} + static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb) { return container_of(nb, struct dw8250_data, clk_notifier); diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h index 794a9014cdac1..7dd2a8e7b7808 100644 --- a/drivers/tty/serial/8250/8250_dwlib.h +++ b/drivers/tty/serial/8250/8250_dwlib.h @@ -2,15 +2,10 @@ /* Synopsys DesignWare 8250 library header file. */ #include -#include #include -#include #include "8250.h" -struct clk; -struct reset_control; - struct dw8250_port_data { /* Port properties */ int line; @@ -26,36 +21,9 @@ struct dw8250_port_data { bool hw_rs485_support; }; -struct dw8250_platform_data { - u8 usr_reg; - u32 cpr_value; - unsigned int quirks; -}; - -struct dw8250_data { - struct dw8250_port_data data; - const struct dw8250_platform_data *pdata; - - int msr_mask_on; - int msr_mask_off; - struct clk *clk; - struct clk *pclk; - struct notifier_block clk_notifier; - struct work_struct clk_work; - struct reset_control *rst; - - unsigned int skip_autocfg:1; - unsigned int uart_16550_compatible:1; -}; - void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old); void dw8250_setup_port(struct uart_port *p); -static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data) -{ - return container_of(data, struct dw8250_data, data); -} - static inline u32 dw8250_readl_ext(struct uart_port *p, int offset) { if (p->iotype == UPIO_MEM32BE) From 5208e7ced520a813b4f4774451fbac4e517e78b2 Mon Sep 17 00:00:00 2001 From: Doug Brown Date: Sun, 19 May 2024 12:19:30 -0700 Subject: [PATCH 242/537] serial: 8250_pxa: Configure tx_loadsz to match FIFO IRQ level The FIFO is 64 bytes, but the FCR is configured to fire the TX interrupt when the FIFO is half empty (bit 3 = 0). Thus, we should only write 32 bytes when a TX interrupt occurs. This fixes a problem observed on the PXA168 that dropped a bunch of TX bytes during large transmissions. Fixes: ab28f51c77cd ("serial: rewrite pxa2xx-uart to use 8250_core") Signed-off-by: Doug Brown Link: https://lore.kernel.org/r/20240519191929.122202-1-doug@schmorgal.com Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_pxa.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c index f1a51b00b1b9d..ba96fa913e7f9 100644 --- a/drivers/tty/serial/8250/8250_pxa.c +++ b/drivers/tty/serial/8250/8250_pxa.c @@ -125,6 +125,7 @@ static int serial_pxa_probe(struct platform_device *pdev) uart.port.iotype = UPIO_MEM32; uart.port.regshift = 2; uart.port.fifosize = 64; + uart.tx_loadsz = 32; uart.dl_write = serial_pxa_dl_write; ret = serial8250_register_8250_port(&uart); From ca84cd379b45e9b1775b9e026f069a3a886b409d Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Fri, 31 May 2024 08:09:18 -0700 Subject: [PATCH 243/537] serial: port: Don't block system suspend even if bytes are left to xmit Recently, suspend testing on sc7180-trogdor based devices has started to sometimes fail with messages like this: port a88000.serial:0.0: PM: calling pm_runtime_force_suspend+0x0/0xf8 @ 28934, parent: a88000.serial:0 port a88000.serial:0.0: PM: dpm_run_callback(): pm_runtime_force_suspend+0x0/0xf8 returns -16 port a88000.serial:0.0: PM: pm_runtime_force_suspend+0x0/0xf8 returned -16 after 33 usecs port a88000.serial:0.0: PM: failed to suspend: error -16 I could reproduce these problems by logging in via an agetty on the debug serial port (which was _not_ used for kernel console) and running: cat /var/log/messages ...and then (via an SSH session) forcing a few suspend/resume cycles. Tracing through the code and doing some printf()-based debugging shows that the -16 (-EBUSY) comes from the recently added serial_port_runtime_suspend(). The idea of the serial_port_runtime_suspend() function is to prevent the port from being _runtime_ suspended if it still has bytes left to transmit. Having bytes left to transmit isn't a reason to block _system_ suspend, though. If a serdev device in the kernel needs to block system suspend it should block its own suspend and it can use serdev_device_wait_until_sent() to ensure bytes are sent. The DEFINE_RUNTIME_DEV_PM_OPS() used by the serial_port code means that the system suspend function will be pm_runtime_force_suspend(). In pm_runtime_force_suspend() we can see that before calling the runtime suspend function we'll call pm_runtime_disable(). This should be a reliable way to detect that we're called from system suspend and that we shouldn't look for busyness. Fixes: 43066e32227e ("serial: port: Don't suspend if the port is still busy") Cc: stable@vger.kernel.org Reviewed-by: Tony Lindgren Signed-off-by: Douglas Anderson Link: https://lore.kernel.org/r/20240531080914.v3.1.I2395e66cf70c6e67d774c56943825c289b9c13e4@changeid Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/serial_port.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c index 91a338d3cb343..d35f1d24156c2 100644 --- a/drivers/tty/serial/serial_port.c +++ b/drivers/tty/serial/serial_port.c @@ -64,6 +64,13 @@ static int serial_port_runtime_suspend(struct device *dev) if (port->flags & UPF_DEAD) return 0; + /* + * Nothing to do on pm_runtime_force_suspend(), see + * DEFINE_RUNTIME_DEV_PM_OPS. + */ + if (!pm_runtime_enabled(dev)) + return 0; + uart_port_lock_irqsave(port, &flags); if (!port_dev->tx_enabled) { uart_port_unlock_irqrestore(port, flags); From 4e534ff4b69c6960a165cab2c851b48f0a0da945 Mon Sep 17 00:00:00 2001 From: Hugo Villeneuve Date: Mon, 3 Jun 2024 11:26:00 -0400 Subject: [PATCH 244/537] serial: sc16is7xx: rename Kconfig CONFIG_SERIAL_SC16IS7XX_CORE Commit d49216438139 ("serial: sc16is7xx: split into core and I2C/SPI parts (core)") renamed SERIAL_SC16IS7XX_CORE by SERIAL_SC16IS7XX. This means that some configs should have been updated when I submitted the original patch, but unfortunately they were not. Geert mentioned for example: arch/mips/configs/cu1??0-neo_defconfig Rename SERIAL_SC16IS7XX to SERIAL_SC16IS7XX_CORE so that existing configs will still work correctly. Suggested-by: Geert Uytterhoeven Fixes: d49216438139 ("serial: sc16is7xx: split into core and I2C/SPI parts (core)") Signed-off-by: Hugo Villeneuve Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20240603152601.3689319-2-hugo@hugovil.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/Kconfig | 2 +- drivers/tty/serial/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 4fdd7857ef4df..2a0da35c0ef8f 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1023,7 +1023,7 @@ config SERIAL_SCCNXP_CONSOLE help Support for console on SCCNXP serial ports. -config SERIAL_SC16IS7XX_CORE +config SERIAL_SC16IS7XX tristate "NXP SC16IS7xx UART support" select SERIAL_CORE select SERIAL_SC16IS7XX_SPI if SPI_MASTER diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index faa45f2b8bb05..6ff74f0a9530c 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -75,7 +75,7 @@ obj-$(CONFIG_SERIAL_SA1100) += sa1100.o obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o -obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o +obj-$(CONFIG_SERIAL_SC16IS7XX) += sc16is7xx.o obj-$(CONFIG_SERIAL_SC16IS7XX_SPI) += sc16is7xx_spi.o obj-$(CONFIG_SERIAL_SC16IS7XX_I2C) += sc16is7xx_i2c.o obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o From 7a2e8e30ad89f498b206977396d028e10174390a Mon Sep 17 00:00:00 2001 From: Hugo Villeneuve Date: Mon, 3 Jun 2024 11:26:01 -0400 Subject: [PATCH 245/537] serial: sc16is7xx: re-add Kconfig SPI or I2C dependency Commit d49216438139 ("serial: sc16is7xx: split into core and I2C/SPI parts (core)") removed Kconfig SPI_MASTER or I2C dependency for SERIAL_SC16IS7XX (core). This removal was done because I inadvertently misinterpreted some review comments. Because of that, the driver question now pops up if both I2C and SPI_MASTER are disabled. Re-add Kconfig SPI_MASTER or I2C dependency to fix the problem. Suggested-by: Geert Uytterhoeven Fixes: d49216438139 ("serial: sc16is7xx: split into core and I2C/SPI parts (core)") Signed-off-by: Hugo Villeneuve Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20240603152601.3689319-3-hugo@hugovil.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 2a0da35c0ef8f..28e4beeabf8f3 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1025,6 +1025,7 @@ config SERIAL_SCCNXP_CONSOLE config SERIAL_SC16IS7XX tristate "NXP SC16IS7xx UART support" + depends on SPI_MASTER || I2C select SERIAL_CORE select SERIAL_SC16IS7XX_SPI if SPI_MASTER select SERIAL_SC16IS7XX_I2C if I2C From ae01e52da244af5d650378ada1bfd2d946dc1b45 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 29 May 2024 00:05:53 +0900 Subject: [PATCH 246/537] serial: drop debugging WARN_ON_ONCE() from uart_write() syzbot is reporting lockdep warning upon int disc = 7; ioctl(open("/dev/ttyS3", O_RDONLY), TIOCSETD, &disc); sequence. Do like what commit 5f1149d2f4bf ("serial: drop debugging WARN_ON_ONCE() from uart_put_char()") does. Reported-by: syzbot+f78380e4eae53c64125c@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=f78380e4eae53c64125c Signed-off-by: Tetsuo Handa Acked-by: Jiri Slaby Link: https://lore.kernel.org/r/d775ae2d-a2ac-439e-8e2b-134749f60f30@I-love.SAKURA.ne.jp Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/serial_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 2c1a0254d3f4a..0c4d609766633 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -622,7 +622,7 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count) return -EL3HLT; port = uart_port_lock(state, flags); - if (WARN_ON_ONCE(!state->port.xmit_buf)) { + if (!state->port.xmit_buf) { uart_port_unlock(port, flags); return 0; } From 718d4a63c0a62d16af1d0425d515d7e76f35681e Mon Sep 17 00:00:00 2001 From: Peter Chen Date: Fri, 17 May 2024 10:36:48 +0800 Subject: [PATCH 247/537] Revert "usb: chipidea: move ci_ulpi_init after the phy initialization" This reverts commit 22ffd399e6e7aa18ae0314278ed0b7f05f8ab679. People report this commit causes the driver defer probed, and never back to work[1][2]. [1] https://lore.kernel.org/lkml/20240407011913.GA168730@nchen-desktop/T/#mc2b93bc11a8b01ec7cd0d0bf6b0b03951d9ef751 [2] https://lore.kernel.org/lkml/20240407011913.GA168730@nchen-desktop/T/#me87d9a2a76c07619d83b3879ea14780da89fbbbf Cc: Michael Grzeschik Cc: Marek Szyprowski Cc: Wouter Franken Signed-off-by: Peter Chen Link: https://lore.kernel.org/r/20240517023648.3459188-1-peter.chen@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/chipidea/core.c | 8 ++++---- drivers/usb/chipidea/ulpi.c | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index bada13f704b62..835bf2428dc6e 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1084,6 +1084,10 @@ static int ci_hdrc_probe(struct platform_device *pdev) return -ENODEV; } + ret = ci_ulpi_init(ci); + if (ret) + return ret; + if (ci->platdata->phy) { ci->phy = ci->platdata->phy; } else if (ci->platdata->usb_phy) { @@ -1138,10 +1142,6 @@ static int ci_hdrc_probe(struct platform_device *pdev) goto ulpi_exit; } - ret = ci_ulpi_init(ci); - if (ret) - return ret; - ci->hw_bank.phys = res->start; ci->irq = platform_get_irq(pdev, 0); diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c index 89fb51e2c3ded..dfec07e8ae1d2 100644 --- a/drivers/usb/chipidea/ulpi.c +++ b/drivers/usb/chipidea/ulpi.c @@ -68,6 +68,11 @@ int ci_ulpi_init(struct ci_hdrc *ci) if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI) return 0; + /* + * Set PORTSC correctly so we can read/write ULPI registers for + * identification purposes + */ + hw_phymode_configure(ci); ci->ulpi_ops.read = ci_ulpi_read; ci->ulpi_ops.write = ci_ulpi_write; From 415ce0ea55c5a3afea501a773e002be9ed7149f5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 3 Jun 2024 13:56:53 -0600 Subject: [PATCH 248/537] io_uring/napi: fix timeout calculation Not quite sure what __io_napi_adjust_timeout() was attemping to do, it's adjusting both the NAPI timeout and the general overall timeout, and calculating a value that is never used. The overall timeout is a super set of the NAPI timeout, and doesn't need adjusting. The only thing we really need to care about is that the NAPI timeout doesn't exceed the overall timeout. If a user asked for a timeout of eg 5 usec and NAPI timeout is 10 usec, then we should not spin for 10 usec. While in there, sanitize the time checking a bit. If we have a negative value in the passed in timeout, discard it. Round up the value as well, so we don't end up with a NAPI timeout for the majority of the wait, with only a tiny sleep value at the end. Hence the only case we need to care about is if the NAPI timeout is larger than the overall timeout. If it is, cap the NAPI timeout at what the overall timeout is. Cc: stable@vger.kernel.org Fixes: 8d0c12a80cde ("io-uring: add napi busy poll support") Reported-by: Lewis Baker Signed-off-by: Jens Axboe --- io_uring/napi.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/io_uring/napi.c b/io_uring/napi.c index 883a1a6659075..8c18ede595c41 100644 --- a/io_uring/napi.c +++ b/io_uring/napi.c @@ -261,12 +261,14 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg) } /* - * __io_napi_adjust_timeout() - Add napi id to the busy poll list + * __io_napi_adjust_timeout() - adjust busy loop timeout * @ctx: pointer to io-uring context structure * @iowq: pointer to io wait queue * @ts: pointer to timespec or NULL * * Adjust the busy loop timeout according to timespec and busy poll timeout. + * If the specified NAPI timeout is bigger than the wait timeout, then adjust + * the NAPI timeout accordingly. */ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq, struct timespec64 *ts) @@ -274,16 +276,16 @@ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iow unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to); if (ts) { - struct timespec64 poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to); - - if (timespec64_compare(ts, &poll_to_ts) > 0) { - *ts = timespec64_sub(*ts, poll_to_ts); - } else { - u64 to = timespec64_to_ns(ts); - - do_div(to, 1000); - ts->tv_sec = 0; - ts->tv_nsec = 0; + struct timespec64 poll_to_ts; + + poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to); + if (timespec64_compare(ts, &poll_to_ts) < 0) { + s64 poll_to_ns = timespec64_to_ns(ts); + if (poll_to_ns > 0) { + u64 val = poll_to_ns + 999; + do_div(val, (s64) 1000); + poll_to = val; + } } } From fc3568f142cc5fe071d83be02d1851717d1fbf66 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 3 Jun 2024 12:00:07 +0200 Subject: [PATCH 249/537] usb: typec: ucsi: glink: increase max ports for x1e80100 The new X Elite (x1e80100) platform has three ports so increase the maximum so that all ports can be registered. Signed-off-by: Johan Hovold Reviewed-by: Dmitry Baryshkov Reviewed-by: Heikki Krogerus Reviewed-by: Abel Vesa Link: https://lore.kernel.org/r/20240603100007.10236-1-johan+linaro@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/ucsi/ucsi_glink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index f7546bb488c32..985a880e86da4 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -14,7 +14,7 @@ #include #include "ucsi.h" -#define PMIC_GLINK_MAX_PORTS 2 +#define PMIC_GLINK_MAX_PORTS 3 #define UCSI_BUF_SIZE 48 From 8475ffcfb381a77075562207ce08552414a80326 Mon Sep 17 00:00:00 2001 From: John Ernberg Date: Fri, 17 May 2024 11:43:52 +0000 Subject: [PATCH 250/537] USB: xen-hcd: Traverse host/ when CONFIG_USB_XEN_HCD is selected If no other USB HCDs are selected when compiling a small pure virutal machine, the Xen HCD driver cannot be built. Fix it by traversing down host/ if CONFIG_USB_XEN_HCD is selected. Fixes: 494ed3997d75 ("usb: Introduce Xen pvUSB frontend (xen hcd)") Cc: stable@vger.kernel.org # v5.17+ Signed-off-by: John Ernberg Link: https://lore.kernel.org/r/20240517114345.1190755-1-john.ernberg@actia.se Signed-off-by: Greg Kroah-Hartman --- drivers/usb/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index 3a9a0dd4be706..949eca0adebea 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_FSL_USB2) += host/ obj-$(CONFIG_USB_FOTG210_HCD) += host/ obj-$(CONFIG_USB_MAX3421_HCD) += host/ +obj-$(CONFIG_USB_XEN_HCD) += host/ obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ From e7e921918d905544500ca7a95889f898121ba886 Mon Sep 17 00:00:00 2001 From: Amit Sunil Dhamne Date: Tue, 14 May 2024 15:01:31 -0700 Subject: [PATCH 251/537] usb: typec: tcpm: fix use-after-free case in tcpm_register_source_caps There could be a potential use-after-free case in tcpm_register_source_caps(). This could happen when: * new (say invalid) source caps are advertised * the existing source caps are unregistered * tcpm_register_source_caps() returns with an error as usb_power_delivery_register_capabilities() fails This causes port->partner_source_caps to hold on to the now freed source caps. Reset port->partner_source_caps value to NULL after unregistering existing source caps. Fixes: 230ecdf71a64 ("usb: typec: tcpm: unregister existing source caps before re-registration") Cc: stable@vger.kernel.org Signed-off-by: Amit Sunil Dhamne Reviewed-by: Ondrej Jirman Reviewed-by: Heikki Krogerus Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20240514220134.2143181-1-amitsd@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/tcpm/tcpm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 8a1af08f71b64..be4127ef84e99 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -3014,8 +3014,10 @@ static int tcpm_register_source_caps(struct tcpm_port *port) memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps); caps.role = TYPEC_SOURCE; - if (cap) + if (cap) { usb_power_delivery_unregister_capabilities(cap); + port->partner_source_caps = NULL; + } cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps); if (IS_ERR(cap)) From fc8fb9eea94d8f476e15f3a4a7addeb16b3b99d6 Mon Sep 17 00:00:00 2001 From: Kyle Tso Date: Mon, 20 May 2024 23:48:58 +0800 Subject: [PATCH 252/537] usb: typec: tcpm: Ignore received Hard Reset in TOGGLING state Similar to what fixed in Commit a6fe37f428c1 ("usb: typec: tcpm: Skip hard reset when in error recovery"), the handling of the received Hard Reset has to be skipped during TOGGLING state. [ 4086.021288] VBUS off [ 4086.021295] pending state change SNK_READY -> SNK_UNATTACHED @ 650 ms [rev2 NONE_AMS] [ 4086.022113] VBUS VSAFE0V [ 4086.022117] state change SNK_READY -> SNK_UNATTACHED [rev2 NONE_AMS] [ 4086.022447] VBUS off [ 4086.022450] state change SNK_UNATTACHED -> SNK_UNATTACHED [rev2 NONE_AMS] [ 4086.023060] VBUS VSAFE0V [ 4086.023064] state change SNK_UNATTACHED -> SNK_UNATTACHED [rev2 NONE_AMS] [ 4086.023070] disable BIST MODE TESTDATA [ 4086.023766] disable vbus discharge ret:0 [ 4086.023911] Setting usb_comm capable false [ 4086.028874] Setting voltage/current limit 0 mV 0 mA [ 4086.028888] polarity 0 [ 4086.030305] Requesting mux state 0, usb-role 0, orientation 0 [ 4086.033539] Start toggling [ 4086.038496] state change SNK_UNATTACHED -> TOGGLING [rev2 NONE_AMS] // This Hard Reset is unexpected [ 4086.038499] Received hard reset [ 4086.038501] state change TOGGLING -> HARD_RESET_START [rev2 HARD_RESET] Fixes: f0690a25a140 ("staging: typec: USB Type-C Port Manager (tcpm)") Cc: stable@vger.kernel.org Signed-off-by: Kyle Tso Reviewed-by: Heikki Krogerus Link: https://lore.kernel.org/r/20240520154858.1072347-1-kyletso@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/tcpm/tcpm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index be4127ef84e99..5d4da962acc87 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -6174,6 +6174,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port) port->tcpc->set_bist_data(port->tcpc, false); switch (port->state) { + case TOGGLING: case ERROR_RECOVERY: case PORT_RESET: case PORT_RESET_WAIT_OFF: From e4228cfd092351c2d9b1a3048b2070287291ccbb Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" Date: Thu, 23 May 2024 14:44:59 -0500 Subject: [PATCH 253/537] dt-bindings: usb: realtek,rts5411: Add missing "additionalProperties" on child nodes All nodes need an explicit additionalProperties or unevaluatedProperties unless a $ref has one that's false. As that is not the case with usb-device.yaml, "additionalProperties" is needed here. Fixes: c44d9dab31d6 ("dt-bindings: usb: Add downstream facing ports to realtek binding") Signed-off-by: "Rob Herring (Arm)" Reviewed-by: Stephen Boyd Link: https://lore.kernel.org/r/20240523194500.2958192-1-robh@kernel.org Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/usb/realtek,rts5411.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml index 0874fc21f66fb..6577a61cc0753 100644 --- a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml +++ b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml @@ -65,6 +65,7 @@ patternProperties: description: The hard wired USB devices type: object $ref: /schemas/usb/usb-device.yaml + additionalProperties: true required: - peer-hub From f85d39dd7ed89ffdd622bc1de247ffba8d961504 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Mon, 27 May 2024 19:35:38 +0200 Subject: [PATCH 254/537] kcov, usb: disable interrupts in kcov_remote_start_usb_softirq After commit 8fea0c8fda30 ("usb: core: hcd: Convert from tasklet to BH workqueue"), usb_giveback_urb_bh() runs in the BH workqueue with interrupts enabled. Thus, the remote coverage collection section in usb_giveback_urb_bh()-> __usb_hcd_giveback_urb() might be interrupted, and the interrupt handler might invoke __usb_hcd_giveback_urb() again. This breaks KCOV, as it does not support nested remote coverage collection sections within the same context (neither in task nor in softirq). Update kcov_remote_start/stop_usb_softirq() to disable interrupts for the duration of the coverage collection section to avoid nested sections in the softirq context (in addition to such in the task context, which are already handled). Reported-by: Tetsuo Handa Closes: https://lore.kernel.org/linux-usb/0f4d1964-7397-485b-bc48-11c01e2fcbca@I-love.SAKURA.ne.jp/ Closes: https://syzkaller.appspot.com/bug?extid=0438378d6f157baae1a2 Suggested-by: Alan Stern Fixes: 8fea0c8fda30 ("usb: core: hcd: Convert from tasklet to BH workqueue") Cc: stable@vger.kernel.org Acked-by: Dmitry Vyukov Signed-off-by: Andrey Konovalov Link: https://lore.kernel.org/r/20240527173538.4989-1-andrey.konovalov@linux.dev Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hcd.c | 12 ++++++----- include/linux/kcov.h | 47 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 45 insertions(+), 14 deletions(-) diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index e3366f4d82b96..1ff7d901fedea 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1623,6 +1623,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb) struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus); struct usb_anchor *anchor = urb->anchor; int status = urb->unlinked; + unsigned long flags; urb->hcpriv = NULL; if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) && @@ -1640,13 +1641,14 @@ static void __usb_hcd_giveback_urb(struct urb *urb) /* pass ownership to the completion handler */ urb->status = status; /* - * This function can be called in task context inside another remote - * coverage collection section, but kcov doesn't support that kind of - * recursion yet. Only collect coverage in softirq context for now. + * Only collect coverage in the softirq context and disable interrupts + * to avoid scenarios with nested remote coverage collection sections + * that KCOV does not support. + * See the comment next to kcov_remote_start_usb_softirq() for details. */ - kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); + flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum); urb->complete(urb); - kcov_remote_stop_softirq(); + kcov_remote_stop_softirq(flags); usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); diff --git a/include/linux/kcov.h b/include/linux/kcov.h index b851ba415e03f..1068a7318d897 100644 --- a/include/linux/kcov.h +++ b/include/linux/kcov.h @@ -55,21 +55,47 @@ static inline void kcov_remote_start_usb(u64 id) /* * The softirq flavor of kcov_remote_*() functions is introduced as a temporary - * work around for kcov's lack of nested remote coverage sections support in - * task context. Adding support for nested sections is tracked in: - * https://bugzilla.kernel.org/show_bug.cgi?id=210337 + * workaround for KCOV's lack of nested remote coverage sections support. + * + * Adding support is tracked in https://bugzilla.kernel.org/show_bug.cgi?id=210337. + * + * kcov_remote_start_usb_softirq(): + * + * 1. Only collects coverage when called in the softirq context. This allows + * avoiding nested remote coverage collection sections in the task context. + * For example, USB/IP calls usb_hcd_giveback_urb() in the task context + * within an existing remote coverage collection section. Thus, KCOV should + * not attempt to start collecting coverage within the coverage collection + * section in __usb_hcd_giveback_urb() in this case. + * + * 2. Disables interrupts for the duration of the coverage collection section. + * This allows avoiding nested remote coverage collection sections in the + * softirq context (a softirq might occur during the execution of a work in + * the BH workqueue, which runs with in_serving_softirq() > 0). + * For example, usb_giveback_urb_bh() runs in the BH workqueue with + * interrupts enabled, so __usb_hcd_giveback_urb() might be interrupted in + * the middle of its remote coverage collection section, and the interrupt + * handler might invoke __usb_hcd_giveback_urb() again. */ -static inline void kcov_remote_start_usb_softirq(u64 id) +static inline unsigned long kcov_remote_start_usb_softirq(u64 id) { - if (in_serving_softirq()) + unsigned long flags = 0; + + if (in_serving_softirq()) { + local_irq_save(flags); kcov_remote_start_usb(id); + } + + return flags; } -static inline void kcov_remote_stop_softirq(void) +static inline void kcov_remote_stop_softirq(unsigned long flags) { - if (in_serving_softirq()) + if (in_serving_softirq()) { kcov_remote_stop(); + local_irq_restore(flags); + } } #ifdef CONFIG_64BIT @@ -103,8 +129,11 @@ static inline u64 kcov_common_handle(void) } static inline void kcov_remote_start_common(u64 id) {} static inline void kcov_remote_start_usb(u64 id) {} -static inline void kcov_remote_start_usb_softirq(u64 id) {} -static inline void kcov_remote_stop_softirq(void) {} +static inline unsigned long kcov_remote_start_usb_softirq(u64 id) +{ + return 0; +} +static inline void kcov_remote_stop_softirq(unsigned long flags) {} #endif /* CONFIG_KCOV */ #endif /* _LINUX_KCOV_H */ From 8bdf8a42bca4f47646fd105a387ab6926948c7f1 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Fri, 31 May 2024 13:46:52 +0300 Subject: [PATCH 255/537] usb: typec: ucsi: Ack also failed Get Error commands It is possible that also the GET_ERROR command fails. If that happens, the command completion still needs to be acknowledged. Otherwise the interface will be stuck until it's reset. Reported-by: Ammy Yi Fixes: bdc62f2bae8f ("usb: typec: ucsi: Simplified registration and I/O API") Cc: stable@vger.kernel.org Signed-off-by: Heikki Krogerus Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20240531104653.1303519-1-heikki.krogerus@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/ucsi/ucsi.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index cb52e7b0a2c5c..2cc7aedd490f1 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -153,8 +153,13 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) } if (cci & UCSI_CCI_ERROR) { - if (cmd == UCSI_GET_ERROR_STATUS) + if (cmd == UCSI_GET_ERROR_STATUS) { + ret = ucsi_acknowledge(ucsi, false); + if (ret) + return ret; + return -EIO; + } return ucsi_read_error(ucsi); } From 16637fea001ab3c8df528a8995b3211906165a30 Mon Sep 17 00:00:00 2001 From: Shichao Lai Date: Sun, 26 May 2024 09:27:45 +0800 Subject: [PATCH 256/537] usb-storage: alauda: Check whether the media is initialized The member "uzonesize" of struct alauda_info will remain 0 if alauda_init_media() fails, potentially causing divide errors in alauda_read_data() and alauda_write_lba(). - Add a member "media_initialized" to struct alauda_info. - Change a condition in alauda_check_media() to ensure the first initialization. - Add an error check for the return value of alauda_init_media(). Fixes: e80b0fade09e ("[PATCH] USB Storage: add alauda support") Reported-by: xingwei lee Reported-by: yue sun Reviewed-by: Alan Stern Signed-off-by: Shichao Lai Link: https://lore.kernel.org/r/20240526012745.2852061-1-shichaorai@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/alauda.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 115f05a6201a1..40d34cc28344a 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -105,6 +105,8 @@ struct alauda_info { unsigned char sense_key; unsigned long sense_asc; /* additional sense code */ unsigned long sense_ascq; /* additional sense code qualifier */ + + bool media_initialized; }; #define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) ) @@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us) } /* Check for media change */ - if (status[0] & 0x08) { + if (status[0] & 0x08 || !info->media_initialized) { usb_stor_dbg(us, "Media change detected\n"); alauda_free_maps(&MEDIA_INFO(us)); - alauda_init_media(us); - + rc = alauda_init_media(us); + if (rc == USB_STOR_TRANSPORT_GOOD) + info->media_initialized = true; info->sense_key = UNIT_ATTENTION; info->sense_asc = 0x28; info->sense_ascq = 0x00; From 91215f70ea8541e9011c0b48f8b59b9e0ce6953b Mon Sep 17 00:00:00 2001 From: Su Hui Date: Tue, 4 Jun 2024 20:12:43 +0800 Subject: [PATCH 257/537] io_uring/io-wq: avoid garbage value of 'match' in io_wq_enqueue() Clang static checker (scan-build) warning: o_uring/io-wq.c:line 1051, column 3 The expression is an uninitialized value. The computed value will also be garbage. 'match.nr_pending' is used in io_acct_cancel_pending_work(), but it is not fully initialized. Change the order of assignment for 'match' to fix this problem. Fixes: 42abc95f05bf ("io-wq: decouple work_list protection from the big wqe->lock") Signed-off-by: Su Hui Link: https://lore.kernel.org/r/20240604121242.2661244-1-suhui@nfschina.com Signed-off-by: Jens Axboe --- io_uring/io-wq.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index d1c47a9d92158..7d3316fe9bfc4 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -927,7 +927,11 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) { struct io_wq_acct *acct = io_work_get_acct(wq, work); unsigned long work_flags = work->flags; - struct io_cb_cancel_data match; + struct io_cb_cancel_data match = { + .fn = io_wq_work_match_item, + .data = work, + .cancel_all = false, + }; bool do_create; /* @@ -965,10 +969,6 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work) raw_spin_unlock(&wq->lock); /* fatal condition, failed to create the first worker */ - match.fn = io_wq_work_match_item, - match.data = work, - match.cancel_all = false, - io_acct_cancel_pending_work(wq, acct, &match); } } From 73254a297c2dd094abec7c9efee32455ae875bdf Mon Sep 17 00:00:00 2001 From: Hagar Hemdan Date: Tue, 4 Jun 2024 13:05:27 +0000 Subject: [PATCH 258/537] io_uring: fix possible deadlock in io_register_iowq_max_workers() The io_register_iowq_max_workers() function calls io_put_sq_data(), which acquires the sqd->lock without releasing the uring_lock. Similar to the commit 009ad9f0c6ee ("io_uring: drop ctx->uring_lock before acquiring sqd->lock"), this can lead to a potential deadlock situation. To resolve this issue, the uring_lock is released before calling io_put_sq_data(), and then it is re-acquired after the function call. This change ensures that the locks are acquired in the correct order, preventing the possibility of a deadlock. Suggested-by: Maximilian Heyne Signed-off-by: Hagar Hemdan Link: https://lore.kernel.org/r/20240604130527.3597-1-hagarhem@amazon.com Signed-off-by: Jens Axboe --- io_uring/register.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/io_uring/register.c b/io_uring/register.c index ef8c908346a4e..c0010a66a6f2c 100644 --- a/io_uring/register.c +++ b/io_uring/register.c @@ -355,8 +355,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, } if (sqd) { + mutex_unlock(&ctx->uring_lock); mutex_unlock(&sqd->lock); io_put_sq_data(sqd); + mutex_lock(&ctx->uring_lock); } if (copy_to_user(arg, new_count, sizeof(new_count))) @@ -380,8 +382,10 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, return 0; err: if (sqd) { + mutex_unlock(&ctx->uring_lock); mutex_unlock(&sqd->lock); io_put_sq_data(sqd); + mutex_lock(&ctx->uring_lock); } return ret; } From 971187350602d03c4a27c0783ff412502b95720a Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 4 Jul 2023 14:17:19 +0100 Subject: [PATCH 259/537] driver core: remove devm_device_add_groups() There is no more in-kernel users of this function, and no driver should ever be using it, so remove it from the kernel. Acked-by: Dmitry Torokhov Acked-by: "Rafael J. Wysocki" Link: https://lore.kernel.org/r/20230704131715.44454-8-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 45 ------------------------------------------ include/linux/device.h | 2 -- 2 files changed, 47 deletions(-) diff --git a/drivers/base/core.c b/drivers/base/core.c index 131d96c6090be..2e776fcc31b69 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2845,15 +2845,6 @@ static void devm_attr_group_remove(struct device *dev, void *res) sysfs_remove_group(&dev->kobj, group); } -static void devm_attr_groups_remove(struct device *dev, void *res) -{ - union device_attr_group_devres *devres = res; - const struct attribute_group **groups = devres->groups; - - dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); - sysfs_remove_groups(&dev->kobj, groups); -} - /** * devm_device_add_group - given a device, create a managed attribute group * @dev: The device to create the group for @@ -2886,42 +2877,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp) } EXPORT_SYMBOL_GPL(devm_device_add_group); -/** - * devm_device_add_groups - create a bunch of managed attribute groups - * @dev: The device to create the group for - * @groups: The attribute groups to create, NULL terminated - * - * This function creates a bunch of managed attribute groups. If an error - * occurs when creating a group, all previously created groups will be - * removed, unwinding everything back to the original state when this - * function was called. It will explicitly warn and error if any of the - * attribute files being created already exist. - * - * Returns 0 on success or error code from sysfs_create_group on failure. - */ -int devm_device_add_groups(struct device *dev, - const struct attribute_group **groups) -{ - union device_attr_group_devres *devres; - int error; - - devres = devres_alloc(devm_attr_groups_remove, - sizeof(*devres), GFP_KERNEL); - if (!devres) - return -ENOMEM; - - error = sysfs_create_groups(&dev->kobj, groups); - if (error) { - devres_free(devres); - return error; - } - - devres->groups = groups; - devres_add(dev, devres); - return 0; -} -EXPORT_SYMBOL_GPL(devm_device_add_groups); - static int device_add_attrs(struct device *dev) { const struct class *class = dev->class; diff --git a/include/linux/device.h b/include/linux/device.h index fc3bd7116ab9c..ace039151cb86 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1220,8 +1220,6 @@ static inline void device_remove_group(struct device *dev, return device_remove_groups(dev, groups); } -int __must_check devm_device_add_groups(struct device *dev, - const struct attribute_group **groups); int __must_check devm_device_add_group(struct device *dev, const struct attribute_group *grp); From 44a45be57f85165761fdabf072f9a97aa026ff61 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 23 May 2024 13:00:00 +0200 Subject: [PATCH 260/537] sysfs: Unbreak the build around sysfs_bin_attr_simple_read() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Günter reports build breakage for m68k "m5208evb_defconfig" plus CONFIG_BLK_DEV_INITRD=y caused by commit 66bc1a173328 ("treewide: Use sysfs_bin_attr_simple_read() helper"). The defconfig disables CONFIG_SYSFS, so sysfs_bin_attr_simple_read() is not compiled into the kernel. But init/initramfs.c references that function in the initializer of a struct bin_attribute. Add an empty static inline to avoid the build breakage. Fixes: 66bc1a173328 ("treewide: Use sysfs_bin_attr_simple_read() helper") Reported-by: Guenter Roeck Closes: https://lore.kernel.org/r/e12b0027-b199-4de7-b83d-668171447ccc@roeck-us.net Signed-off-by: Lukas Wunner Tested-by: Guenter Roeck Reviewed-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/05f4290439a58730738a15b0c99cd8576c4aa0d9.1716461752.git.lukas@wunner.de Signed-off-by: Greg Kroah-Hartman --- include/linux/sysfs.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index a7d725fbf7393..c4e64dc112063 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -750,6 +750,15 @@ static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) { return 0; } + +static inline ssize_t sysfs_bin_attr_simple_read(struct file *file, + struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, + size_t count) +{ + return 0; +} #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, From 87bb39ed40bdf1596b8820e800226e24eb642677 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:43 +0100 Subject: [PATCH 261/537] KVM: arm64: Reintroduce __sve_save_state Now that the hypervisor is handling the host sve state in protected mode, it needs to be able to save it. This reverts commit e66425fc9ba3 ("KVM: arm64: Remove unused __sve_save_state"). Reviewed-by: Oliver Upton Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20240603122852.3923848-2-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_hyp.h | 1 + arch/arm64/kvm/hyp/fpsimd.S | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 3e80464f89531..2ab23589339a0 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -111,6 +111,7 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); +void __sve_save_state(void *sve_pffr, u32 *fpsr); void __sve_restore_state(void *sve_pffr, u32 *fpsr); u64 __guest_enter(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S index 61e6f3ba7b7d1..e950875e31cee 100644 --- a/arch/arm64/kvm/hyp/fpsimd.S +++ b/arch/arm64/kvm/hyp/fpsimd.S @@ -25,3 +25,9 @@ SYM_FUNC_START(__sve_restore_state) sve_load 0, x1, x2, 3 ret SYM_FUNC_END(__sve_restore_state) + +SYM_FUNC_START(__sve_save_state) + mov x2, #1 + sve_save 0, x1, x2, 3 + ret +SYM_FUNC_END(__sve_save_state) From 45f4ea9bcfe909b3461059990b1e232e55dde809 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:44 +0100 Subject: [PATCH 262/537] KVM: arm64: Fix prototype for __sve_save_state/__sve_restore_state Since the prototypes for __sve_save_state/__sve_restore_state at hyp were added, the underlying macro has acquired a third parameter for saving/restoring ffr. Fix the prototypes to account for the third parameter, and restore the ffr for the guest since it is saved. Suggested-by: Mark Brown Signed-off-by: Fuad Tabba Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20240603122852.3923848-3-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_hyp.h | 4 ++-- arch/arm64/kvm/hyp/include/hyp/switch.h | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 2ab23589339a0..686cce7e4e96c 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -111,8 +111,8 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); -void __sve_save_state(void *sve_pffr, u32 *fpsr); -void __sve_restore_state(void *sve_pffr, u32 *fpsr); +void __sve_save_state(void *sve_pffr, u32 *fpsr, int save_ffr); +void __sve_restore_state(void *sve_pffr, u32 *fpsr, int restore_ffr); u64 __guest_enter(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index a92566f36022e..d58933ae8fd5c 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -316,7 +316,8 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) { sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); __sve_restore_state(vcpu_sve_pffr(vcpu), - &vcpu->arch.ctxt.fp_regs.fpsr); + &vcpu->arch.ctxt.fp_regs.fpsr, + true); write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); } From 6d8fb3cbf7e06431a607c30c1bc4cd53a62c220a Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:45 +0100 Subject: [PATCH 263/537] KVM: arm64: Abstract set/clear of CPTR_EL2 bits behind helper The same traps controlled by CPTR_EL2 or CPACR_EL1 need to be toggled in different parts of the code, but the exact bits and their polarity differ between these two formats and the mode (vhe/nvhe/hvhe). To reduce the amount of duplicated code and the chance of getting the wrong bit/polarity or missing a field, abstract the set/clear of CPTR_EL2 bits behind a helper. Since (h)VHE is the way of the future, use the CPACR_EL1 format, which is a subset of the VHE CPTR_EL2, as a reference. No functional change intended. Suggested-by: Oliver Upton Reviewed-by: Oliver Upton Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20240603122852.3923848-4-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_arm.h | 6 +++ arch/arm64/include/asm/kvm_emulate.h | 62 +++++++++++++++++++++++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 18 ++----- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 6 +-- 4 files changed, 73 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index e01bb5ca13b7c..b2adc2c6c82a5 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -305,6 +305,12 @@ GENMASK(19, 14) | \ BIT(11)) +#define CPTR_VHE_EL2_RES0 (GENMASK(63, 32) | \ + GENMASK(27, 26) | \ + GENMASK(23, 22) | \ + GENMASK(19, 18) | \ + GENMASK(15, 0)) + /* Hyp Debug Configuration Register bits */ #define MDCR_EL2_E2TB_MASK (UL(0x3)) #define MDCR_EL2_E2TB_SHIFT (UL(24)) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 501e3e019c930..2d7a0bdf9d03f 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -557,6 +557,68 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) vcpu_set_flag((v), e); \ } while (0) +#define __build_check_all_or_none(r, bits) \ + BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits)) + +#define __cpacr_to_cptr_clr(clr, set) \ + ({ \ + u64 cptr = 0; \ + \ + if ((set) & CPACR_ELx_FPEN) \ + cptr |= CPTR_EL2_TFP; \ + if ((set) & CPACR_ELx_ZEN) \ + cptr |= CPTR_EL2_TZ; \ + if ((set) & CPACR_ELx_SMEN) \ + cptr |= CPTR_EL2_TSM; \ + if ((clr) & CPACR_ELx_TTA) \ + cptr |= CPTR_EL2_TTA; \ + if ((clr) & CPTR_EL2_TAM) \ + cptr |= CPTR_EL2_TAM; \ + if ((clr) & CPTR_EL2_TCPAC) \ + cptr |= CPTR_EL2_TCPAC; \ + \ + cptr; \ + }) + +#define __cpacr_to_cptr_set(clr, set) \ + ({ \ + u64 cptr = 0; \ + \ + if ((clr) & CPACR_ELx_FPEN) \ + cptr |= CPTR_EL2_TFP; \ + if ((clr) & CPACR_ELx_ZEN) \ + cptr |= CPTR_EL2_TZ; \ + if ((clr) & CPACR_ELx_SMEN) \ + cptr |= CPTR_EL2_TSM; \ + if ((set) & CPACR_ELx_TTA) \ + cptr |= CPTR_EL2_TTA; \ + if ((set) & CPTR_EL2_TAM) \ + cptr |= CPTR_EL2_TAM; \ + if ((set) & CPTR_EL2_TCPAC) \ + cptr |= CPTR_EL2_TCPAC; \ + \ + cptr; \ + }) + +#define cpacr_clear_set(clr, set) \ + do { \ + BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \ + BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \ + __build_check_all_or_none((clr), CPACR_ELx_FPEN); \ + __build_check_all_or_none((set), CPACR_ELx_FPEN); \ + __build_check_all_or_none((clr), CPACR_ELx_ZEN); \ + __build_check_all_or_none((set), CPACR_ELx_ZEN); \ + __build_check_all_or_none((clr), CPACR_ELx_SMEN); \ + __build_check_all_or_none((set), CPACR_ELx_SMEN); \ + \ + if (has_vhe() || has_hvhe()) \ + sysreg_clear_set(cpacr_el1, clr, set); \ + else \ + sysreg_clear_set(cptr_el2, \ + __cpacr_to_cptr_clr(clr, set), \ + __cpacr_to_cptr_set(clr, set));\ + } while (0) + static __always_inline void kvm_write_cptr_el2(u64 val) { if (has_vhe() || has_hvhe()) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index d58933ae8fd5c..055d2ca7264e0 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -331,7 +331,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) { bool sve_guest; u8 esr_ec; - u64 reg; if (!system_supports_fpsimd()) return false; @@ -354,19 +353,10 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) /* Valid trap. Switch the context: */ /* First disable enough traps to allow us to update the registers */ - if (has_vhe() || has_hvhe()) { - reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN; - if (sve_guest) - reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; - - sysreg_clear_set(cpacr_el1, 0, reg); - } else { - reg = CPTR_EL2_TFP; - if (sve_guest) - reg |= CPTR_EL2_TZ; - - sysreg_clear_set(cptr_el2, reg, 0); - } + if (sve_guest) + cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN); + else + cpacr_clear_set(0, CPACR_ELx_FPEN); isb(); /* Write out the host state if it's in the registers */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index d5c48dc98f679..f71394d0e32a7 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -405,11 +405,7 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) handle_host_smc(host_ctxt); break; case ESR_ELx_EC_SVE: - if (has_hvhe()) - sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN | - CPACR_EL1_ZEN_EL0EN)); - else - sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); + cpacr_clear_set(0, CPACR_ELx_ZEN); isb(); sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); break; From e511e08a9f496948b13aac50610f2d17335f56c3 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:46 +0100 Subject: [PATCH 264/537] KVM: arm64: Specialize handling of host fpsimd state on trap In subsequent patches, n/vhe will diverge on saving the host fpsimd/sve state when taking a guest fpsimd/sve trap. Add a specialized helper to handle it. No functional change intended. Reviewed-by: Mark Brown Reviewed-by: Oliver Upton Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20240603122852.3923848-5-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 +++- arch/arm64/kvm/hyp/nvhe/switch.c | 5 +++++ arch/arm64/kvm/hyp/vhe/switch.c | 5 +++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 055d2ca7264e0..9b904c858df08 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -321,6 +321,8 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); } +static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu); + /* * We trap the first access to the FP/SIMD to save the host context and * restore the guest context lazily. @@ -361,7 +363,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) /* Write out the host state if it's in the registers */ if (host_owns_fp_regs()) - __fpsimd_save_state(*host_data_ptr(fpsimd_state)); + kvm_hyp_save_fpsimd_host(vcpu); /* Restore the guest state */ if (sve_guest) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 6758cd9055706..019f863922fa4 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -182,6 +182,11 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) kvm_handle_pvm_sysreg(vcpu, exit_code)); } +static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) +{ + __fpsimd_save_state(*host_data_ptr(fpsimd_state)); +} + static const exit_handler_fn hyp_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = NULL, [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index d7af5f46f22a3..20073579e9f5b 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -262,6 +262,11 @@ static bool kvm_hyp_handle_eret(struct kvm_vcpu *vcpu, u64 *exit_code) return true; } +static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) +{ + __fpsimd_save_state(*host_data_ptr(fpsimd_state)); +} + static const exit_handler_fn hyp_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = NULL, [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, From 66d5b53e20a6e00b7ce3b652a3e2db967f7b33d0 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:47 +0100 Subject: [PATCH 265/537] KVM: arm64: Allocate memory mapped at hyp for host sve state in pKVM Protected mode needs to maintain (save/restore) the host's sve state, rather than relying on the host kernel to do that. This is to avoid leaking information to the host about guests and the type of operations they are performing. As a first step towards that, allocate memory mapped at hyp, per cpu, for the host sve state. The following patch will use this memory to save/restore the host state. Reviewed-by: Oliver Upton Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20240603122852.3923848-6-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 17 ++++++++ arch/arm64/include/asm/kvm_hyp.h | 1 + arch/arm64/include/asm/kvm_pkvm.h | 9 ++++ arch/arm64/kvm/arm.c | 68 +++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 + arch/arm64/kvm/hyp/nvhe/setup.c | 24 +++++++++++ arch/arm64/kvm/reset.c | 3 ++ 7 files changed, 124 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8170c04fde914..90df7ccec5f4f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -76,6 +76,7 @@ static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; }; DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); extern unsigned int __ro_after_init kvm_sve_max_vl; +extern unsigned int __ro_after_init kvm_host_sve_max_vl; int __init kvm_arm_init_sve(void); u32 __attribute_const__ kvm_target_cpu(void); @@ -521,6 +522,20 @@ struct kvm_cpu_context { u64 *vncr_array; }; +struct cpu_sve_state { + __u64 zcr_el1; + + /* + * Ordering is important since __sve_save_state/__sve_restore_state + * relies on it. + */ + __u32 fpsr; + __u32 fpcr; + + /* Must be SVE_VQ_BYTES (128 bit) aligned. */ + __u8 sve_regs[]; +}; + /* * This structure is instantiated on a per-CPU basis, and contains * data that is: @@ -534,7 +549,9 @@ struct kvm_cpu_context { */ struct kvm_host_data { struct kvm_cpu_context host_ctxt; + struct user_fpsimd_state *fpsimd_state; /* hyp VA */ + struct cpu_sve_state *sve_state; /* hyp VA */ /* Ownership of the FP regs */ enum { diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 686cce7e4e96c..b05bceca33850 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -143,5 +143,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val); extern unsigned long kvm_nvhe_sym(__icache_flags); extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits); +extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl); #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h index ad9cfb5c1ff4e..cd56acd9a842c 100644 --- a/arch/arm64/include/asm/kvm_pkvm.h +++ b/arch/arm64/include/asm/kvm_pkvm.h @@ -128,4 +128,13 @@ static inline unsigned long hyp_ffa_proxy_pages(void) return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE); } +static inline size_t pkvm_host_sve_state_size(void) +{ + if (!system_supports_sve()) + return 0; + + return size_add(sizeof(struct cpu_sve_state), + SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl))); +} + #endif /* __ARM64_KVM_PKVM_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 9996a989b52e8..1acf7415e831b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1931,6 +1931,11 @@ static unsigned long nvhe_percpu_order(void) return size ? get_order(size) : 0; } +static size_t pkvm_host_sve_state_order(void) +{ + return get_order(pkvm_host_sve_state_size()); +} + /* A lookup table holding the hypervisor VA for each vector slot */ static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; @@ -2310,12 +2315,20 @@ static void __init teardown_subsystems(void) static void __init teardown_hyp_mode(void) { + bool free_sve = system_supports_sve() && is_protected_kvm_enabled(); int cpu; free_hyp_pgds(); for_each_possible_cpu(cpu) { free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); + + if (free_sve) { + struct cpu_sve_state *sve_state; + + sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; + free_pages((unsigned long) sve_state, pkvm_host_sve_state_order()); + } } } @@ -2398,6 +2411,50 @@ static int __init kvm_hyp_init_protection(u32 hyp_va_bits) return 0; } +static int init_pkvm_host_sve_state(void) +{ + int cpu; + + if (!system_supports_sve()) + return 0; + + /* Allocate pages for host sve state in protected mode. */ + for_each_possible_cpu(cpu) { + struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order()); + + if (!page) + return -ENOMEM; + + per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page); + } + + /* + * Don't map the pages in hyp since these are only used in protected + * mode, which will (re)create its own mapping when initialized. + */ + + return 0; +} + +/* + * Finalizes the initialization of hyp mode, once everything else is initialized + * and the initialziation process cannot fail. + */ +static void finalize_init_hyp_mode(void) +{ + int cpu; + + if (!is_protected_kvm_enabled() || !system_supports_sve()) + return; + + for_each_possible_cpu(cpu) { + struct cpu_sve_state *sve_state; + + sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; + per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = kern_hyp_va(sve_state); + } +} + static void pkvm_hyp_init_ptrauth(void) { struct kvm_cpu_context *hyp_ctxt; @@ -2566,6 +2623,10 @@ static int __init init_hyp_mode(void) goto out_err; } + err = init_pkvm_host_sve_state(); + if (err) + goto out_err; + err = kvm_hyp_init_protection(hyp_va_bits); if (err) { kvm_err("Failed to init hyp memory protection\n"); @@ -2730,6 +2791,13 @@ static __init int kvm_arm_init(void) if (err) goto out_subs; + /* + * This should be called after initialization is done and failure isn't + * possible anymore. + */ + if (!in_hyp_mode) + finalize_init_hyp_mode(); + kvm_arm_initialised = true; return 0; diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 16aa4875ddb8c..25e9a94f6d764 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -18,6 +18,8 @@ unsigned long __icache_flags; /* Used by kvm_get_vttbr(). */ unsigned int kvm_arm_vmid_bits; +unsigned int kvm_host_sve_max_vl; + /* * Set trap register values based on features in ID_AA64PFR0. */ diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 859f22f754d37..3fae424795982 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -67,6 +67,28 @@ static int divide_memory_pool(void *virt, unsigned long size) return 0; } +static int pkvm_create_host_sve_mappings(void) +{ + void *start, *end; + int ret, i; + + if (!system_supports_sve()) + return 0; + + for (i = 0; i < hyp_nr_cpus; i++) { + struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i); + struct cpu_sve_state *sve_state = host_data->sve_state; + + start = kern_hyp_va(sve_state); + end = start + PAGE_ALIGN(pkvm_host_sve_state_size()); + ret = pkvm_create_mappings(start, end, PAGE_HYP); + if (ret) + return ret; + } + + return 0; +} + static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, unsigned long *per_cpu_base, u32 hyp_va_bits) @@ -125,6 +147,8 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, return ret; } + pkvm_create_host_sve_mappings(); + /* * Map the host sections RO in the hypervisor, but transfer the * ownership from the host to the hypervisor itself to make sure they diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 1b7b58cb121f9..3fc8ca164dbe4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -32,6 +32,7 @@ /* Maximum phys_shift supported for any VM on this host */ static u32 __ro_after_init kvm_ipa_limit; +unsigned int __ro_after_init kvm_host_sve_max_vl; /* * ARMv8 Reset Values @@ -51,6 +52,8 @@ int __init kvm_arm_init_sve(void) { if (system_supports_sve()) { kvm_sve_max_vl = sve_max_virtualisable_vl(); + kvm_host_sve_max_vl = sve_max_vl(); + kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl; /* * The get_sve_reg()/set_sve_reg() ioctl interface will need From b5b9955617bc0b41546f2fa7c3dbcc048b43dc82 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:48 +0100 Subject: [PATCH 266/537] KVM: arm64: Eagerly restore host fpsimd/sve state in pKVM When running in protected mode we don't want to leak protected guest state to the host, including whether a guest has used fpsimd/sve. Therefore, eagerly restore the host state on guest exit when running in protected mode, which happens only if the guest has used fpsimd/sve. Reviewed-by: Oliver Upton Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20240603122852.3923848-7-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/include/hyp/switch.h | 13 ++++- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 67 +++++++++++++++++++++++-- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 + arch/arm64/kvm/hyp/nvhe/switch.c | 16 +++++- 4 files changed, 93 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9b904c858df08..0c4de44534b7a 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -321,6 +321,17 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); } +static inline void __hyp_sve_save_host(void) +{ + struct cpu_sve_state *sve_state = *host_data_ptr(sve_state); + + sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR); + write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); + __sve_save_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), + &sve_state->fpsr, + true); +} + static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu); /* @@ -355,7 +366,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) /* Valid trap. Switch the context: */ /* First disable enough traps to allow us to update the registers */ - if (sve_guest) + if (sve_guest || (is_protected_kvm_enabled() && system_supports_sve())) cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN); else cpacr_clear_set(0, CPACR_ELx_FPEN); diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index f71394d0e32a7..bd93b8a9e1726 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -23,20 +23,80 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); +static void __hyp_sve_save_guest(struct kvm_vcpu *vcpu) +{ + __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); + /* + * On saving/restoring guest sve state, always use the maximum VL for + * the guest. The layout of the data when saving the sve state depends + * on the VL, so use a consistent (i.e., the maximum) guest VL. + */ + sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); + __sve_save_state(vcpu_sve_pffr(vcpu), &vcpu->arch.ctxt.fp_regs.fpsr, true); + write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); +} + +static void __hyp_sve_restore_host(void) +{ + struct cpu_sve_state *sve_state = *host_data_ptr(sve_state); + + /* + * On saving/restoring host sve state, always use the maximum VL for + * the host. The layout of the data when saving the sve state depends + * on the VL, so use a consistent (i.e., the maximum) host VL. + * + * Setting ZCR_EL2 to ZCR_ELx_LEN_MASK sets the effective length + * supported by the system (or limited at EL3). + */ + write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); + __sve_restore_state(sve_state->sve_regs + sve_ffr_offset(kvm_host_sve_max_vl), + &sve_state->fpsr, + true); + write_sysreg_el1(sve_state->zcr_el1, SYS_ZCR); +} + +static void fpsimd_sve_flush(void) +{ + *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED; +} + +static void fpsimd_sve_sync(struct kvm_vcpu *vcpu) +{ + if (!guest_owns_fp_regs()) + return; + + cpacr_clear_set(0, CPACR_ELx_FPEN | CPACR_ELx_ZEN); + isb(); + + if (vcpu_has_sve(vcpu)) + __hyp_sve_save_guest(vcpu); + else + __fpsimd_save_state(&vcpu->arch.ctxt.fp_regs); + + if (system_supports_sve()) + __hyp_sve_restore_host(); + else + __fpsimd_restore_state(*host_data_ptr(fpsimd_state)); + + *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED; +} + static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) { struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu; + fpsimd_sve_flush(); + hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt; hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state); - hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl; + /* Limit guest vector length to the maximum supported by the host. */ + hyp_vcpu->vcpu.arch.sve_max_vl = min(host_vcpu->arch.sve_max_vl, kvm_host_sve_max_vl); hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu; hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2; hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2; - hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2; hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags; @@ -54,10 +114,11 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3; unsigned int i; + fpsimd_sve_sync(&hyp_vcpu->vcpu); + host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt; host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2; - host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2; host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault; diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 25e9a94f6d764..feb27b4ce4592 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -588,6 +588,8 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, if (ret) unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu)); + hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu); + return ret; } diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 019f863922fa4..bef74de7065b3 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -184,7 +184,21 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) { - __fpsimd_save_state(*host_data_ptr(fpsimd_state)); + /* + * Non-protected kvm relies on the host restoring its sve state. + * Protected kvm restores the host's sve state as not to reveal that + * fpsimd was used by a guest nor leak upper sve bits. + */ + if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) { + __hyp_sve_save_host(); + + /* Re-enable SVE traps if not supported for the guest vcpu. */ + if (!vcpu_has_sve(vcpu)) + cpacr_clear_set(CPACR_ELx_ZEN, 0); + + } else { + __fpsimd_save_state(*host_data_ptr(fpsimd_state)); + } } static const exit_handler_fn hyp_exit_handlers[] = { From 1696fc2174dbab12228ea9ec4c213d6aeea348f8 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:49 +0100 Subject: [PATCH 267/537] KVM: arm64: Consolidate initializing the host data's fpsimd_state/sve in pKVM Now that we have introduced finalize_init_hyp_mode(), lets consolidate the initializing of the host_data fpsimd_state and sve state. Reviewed-by: Oliver Upton Signed-off-by: Fuad Tabba Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20240603122852.3923848-8-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 10 ++++++++-- arch/arm64/kvm/arm.c | 20 ++++++++++++++------ arch/arm64/kvm/hyp/include/nvhe/pkvm.h | 1 - arch/arm64/kvm/hyp/nvhe/pkvm.c | 11 ----------- arch/arm64/kvm/hyp/nvhe/setup.c | 1 - 5 files changed, 22 insertions(+), 21 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 90df7ccec5f4f..36b8e97bf49ec 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -550,8 +550,14 @@ struct cpu_sve_state { struct kvm_host_data { struct kvm_cpu_context host_ctxt; - struct user_fpsimd_state *fpsimd_state; /* hyp VA */ - struct cpu_sve_state *sve_state; /* hyp VA */ + /* + * All pointers in this union are hyp VA. + * sve_state is only used in pKVM and if system_supports_sve(). + */ + union { + struct user_fpsimd_state *fpsimd_state; + struct cpu_sve_state *sve_state; + }; /* Ownership of the FP regs */ enum { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 1acf7415e831b..59716789fe0f3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2444,14 +2444,22 @@ static void finalize_init_hyp_mode(void) { int cpu; - if (!is_protected_kvm_enabled() || !system_supports_sve()) - return; + if (system_supports_sve() && is_protected_kvm_enabled()) { + for_each_possible_cpu(cpu) { + struct cpu_sve_state *sve_state; - for_each_possible_cpu(cpu) { - struct cpu_sve_state *sve_state; + sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; + per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = + kern_hyp_va(sve_state); + } + } else { + for_each_possible_cpu(cpu) { + struct user_fpsimd_state *fpsimd_state; - sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; - per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = kern_hyp_va(sve_state); + fpsimd_state = &per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->host_ctxt.fp_regs; + per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->fpsimd_state = + kern_hyp_va(fpsimd_state); + } } } diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h index 22f374e9f5329..24a9a8330d190 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h +++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h @@ -59,7 +59,6 @@ static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu) } void pkvm_hyp_vm_table_init(void *tbl); -void pkvm_host_fpsimd_state_init(void); int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, unsigned long pgd_hva); diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index feb27b4ce4592..ea67fcbf83761 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -249,17 +249,6 @@ void pkvm_hyp_vm_table_init(void *tbl) vm_table = tbl; } -void pkvm_host_fpsimd_state_init(void) -{ - unsigned long i; - - for (i = 0; i < hyp_nr_cpus; i++) { - struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i); - - host_data->fpsimd_state = &host_data->host_ctxt.fp_regs; - } -} - /* * Return the hyp vm structure corresponding to the handle. */ diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 3fae424795982..f4350ba07b0b0 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -324,7 +324,6 @@ void __noreturn __pkvm_init_finalise(void) goto out; pkvm_hyp_vm_table_init(vm_table_base); - pkvm_host_fpsimd_state_init(); out: /* * We tail-called to here from handle___pkvm_init() and will not return, From a69283ae1db8dd416870d931caa9e2d3d2c1cd8b Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:50 +0100 Subject: [PATCH 268/537] KVM: arm64: Refactor CPACR trap bit setting/clearing to use ELx format When setting/clearing CPACR bits for EL0 and EL1, use the ELx format of the bits, which covers both. This makes the code clearer, and reduces the chances of accidentally missing a bit. No functional change intended. Reviewed-by: Oliver Upton Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20240603122852.3923848-9-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/el2_setup.h | 6 +++--- arch/arm64/include/asm/kvm_emulate.h | 9 ++++----- arch/arm64/kvm/fpsimd.c | 4 +--- arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 5 ++--- arch/arm64/kvm/hyp/vhe/switch.c | 7 +++---- 6 files changed, 14 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index e4546b29dd0cb..fd87c4b8f9840 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -146,7 +146,7 @@ /* Coprocessor traps */ .macro __init_el2_cptr __check_hvhe .LnVHE_\@, x1 - mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN) + mov x0, #CPACR_ELx_FPEN msr cpacr_el1, x0 b .Lskip_set_cptr_\@ .LnVHE_\@: @@ -277,7 +277,7 @@ // (h)VHE case mrs x0, cpacr_el1 // Disable SVE traps - orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) + orr x0, x0, #CPACR_ELx_ZEN msr cpacr_el1, x0 b .Lskip_set_cptr_\@ @@ -298,7 +298,7 @@ // (h)VHE case mrs x0, cpacr_el1 // Disable SME traps - orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN) + orr x0, x0, #CPACR_ELx_SMEN msr cpacr_el1, x0 b .Lskip_set_cptr_sme_\@ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 2d7a0bdf9d03f..21650e7924d45 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -632,17 +632,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) u64 val; if (has_vhe()) { - val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | - CPACR_EL1_ZEN_EL1EN); + val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN); if (cpus_have_final_cap(ARM64_SME)) val |= CPACR_EL1_SMEN_EL1EN; } else if (has_hvhe()) { - val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); + val = CPACR_ELx_FPEN; if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) - val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN; + val |= CPACR_ELx_ZEN; if (cpus_have_final_cap(ARM64_SME)) - val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN; + val |= CPACR_ELx_SMEN; } else { val = CPTR_NVHE_EL2_RES1; diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 1807d3a79a8af..eb21f29d91fc5 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -161,9 +161,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) if (has_vhe() && system_supports_sme()) { /* Also restore EL0 state seen on entry */ if (vcpu_get_flag(vcpu, HOST_SME_ENABLED)) - sysreg_clear_set(CPACR_EL1, 0, - CPACR_EL1_SMEN_EL0EN | - CPACR_EL1_SMEN_EL1EN); + sysreg_clear_set(CPACR_EL1, 0, CPACR_ELx_SMEN); else sysreg_clear_set(CPACR_EL1, CPACR_EL1_SMEN_EL0EN, diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index ea67fcbf83761..95cf185742517 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -65,7 +65,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) /* Trap SVE */ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) { if (has_hvhe()) - cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; + cptr_clear |= CPACR_ELx_ZEN; else cptr_set |= CPTR_EL2_TZ; } diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index bef74de7065b3..6af179c6356d6 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -48,15 +48,14 @@ static void __activate_traps(struct kvm_vcpu *vcpu) val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA; if (cpus_have_final_cap(ARM64_SME)) { if (has_hvhe()) - val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN); + val &= ~CPACR_ELx_SMEN; else val |= CPTR_EL2_TSM; } if (!guest_owns_fp_regs()) { if (has_hvhe()) - val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | - CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN); + val &= ~(CPACR_ELx_FPEN | CPACR_ELx_ZEN); else val |= CPTR_EL2_TFP | CPTR_EL2_TZ; diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 20073579e9f5b..8fbb6a2e0559d 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -93,8 +93,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu) val = read_sysreg(cpacr_el1); val |= CPACR_ELx_TTA; - val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN | - CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN); + val &= ~(CPACR_ELx_ZEN | CPACR_ELx_SMEN); /* * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to @@ -109,9 +108,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu) if (guest_owns_fp_regs()) { if (vcpu_has_sve(vcpu)) - val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; + val |= CPACR_ELx_ZEN; } else { - val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); + val &= ~CPACR_ELx_FPEN; __activate_traps_fpsimd32(vcpu); } From afb91f5f8ad7af172d993a34fde1947892408f53 Mon Sep 17 00:00:00 2001 From: Fuad Tabba Date: Mon, 3 Jun 2024 13:28:51 +0100 Subject: [PATCH 269/537] KVM: arm64: Ensure that SME controls are disabled in protected mode KVM (and pKVM) do not support SME guests. Therefore KVM ensures that the host's SME state is flushed and that SME controls for enabling access to ZA storage and for streaming are disabled. pKVM needs to protect against a buggy/malicious host. Ensure that it wouldn't run a guest when protected mode is enabled should any of the SME controls be enabled. Signed-off-by: Fuad Tabba Link: https://lore.kernel.org/r/20240603122852.3923848-10-tabba@google.com Signed-off-by: Marc Zyngier --- arch/arm64/kvm/fpsimd.c | 7 +++++++ arch/arm64/kvm/hyp/nvhe/hyp-main.c | 11 +++++++++++ 2 files changed, 18 insertions(+) diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index eb21f29d91fc5..521b32868d0d2 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -90,6 +90,13 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) fpsimd_save_and_flush_cpu_state(); } } + + /* + * If normal guests gain SME support, maintain this behavior for pKVM + * guests, which don't support SME. + */ + WARN_ON(is_protected_kvm_enabled() && system_supports_sme() && + read_sysreg_s(SYS_SVCR)); } /* diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index bd93b8a9e1726..f43d845f3c4ec 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -140,6 +140,17 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) struct pkvm_hyp_vcpu *hyp_vcpu; struct kvm *host_kvm; + /* + * KVM (and pKVM) doesn't support SME guests for now, and + * ensures that SME features aren't enabled in pstate when + * loading a vcpu. Therefore, if SME features enabled the host + * is misbehaving. + */ + if (unlikely(system_supports_sme() && read_sysreg_s(SYS_SVCR))) { + ret = -EINVAL; + goto out; + } + host_kvm = kern_hyp_va(host_vcpu->kvm); hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle, host_vcpu->vcpu_idx); From ce3af2ee95170b7d9e15fff6e500d67deab1e7b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Fri, 24 May 2024 15:05:39 +0200 Subject: [PATCH 270/537] HID: logitech-dj: Fix memory leak in logi_dj_recv_switch_to_dj_mode() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a memory leak on logi_dj_recv_send_report() error path. Fixes: 6f20d3261265 ("HID: logitech-dj: Fix error handling in logi_dj_recv_switch_to_dj_mode()") Signed-off-by: José Expósito Signed-off-by: Jiri Kosina --- drivers/hid/hid-logitech-dj.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 3c3c497b6b911..37958edec55f5 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, */ msleep(50); - if (retval) + if (retval) { + kfree(dj_report); return retval; + } } /* From 655a8a7684b897721f87c59798fd04d8b79f1b69 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 26 May 2024 17:05:59 +0200 Subject: [PATCH 271/537] HID: nintendo: Fix an error handling path in nintendo_hid_probe() joycon_leds_create() has a ida_alloc() call. So if an error occurs after it, a corresponding ida_free() call is needed, as already done in the .remove function. This is not 100% perfect, because if ida_alloc() fails, then 'ctlr->player_id' will forced to be U32_MAX, and an error will be logged when ida_free() is called. Considering that this can't happen in real life, no special handling is done to handle it. Fixes: 5307de63d71d ("HID: nintendo: use ida for LED player id") Signed-off-by: Christophe JAILLET Reviewed-by: Silvan Jegen Signed-off-by: Jiri Kosina --- drivers/hid/hid-nintendo.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index b4a97803eca3e..3062daf68d312 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -2725,13 +2725,13 @@ static int nintendo_hid_probe(struct hid_device *hdev, ret = joycon_power_supply_create(ctlr); if (ret) { hid_err(hdev, "Failed to create power_supply; ret=%d\n", ret); - goto err_close; + goto err_ida; } ret = joycon_input_create(ctlr); if (ret) { hid_err(hdev, "Failed to create input device; ret=%d\n", ret); - goto err_close; + goto err_ida; } ctlr->ctlr_state = JOYCON_CTLR_STATE_READ; @@ -2739,6 +2739,8 @@ static int nintendo_hid_probe(struct hid_device *hdev, hid_dbg(hdev, "probe - success\n"); return 0; +err_ida: + ida_free(&nintendo_player_id_allocator, ctlr->player_id); err_close: hid_hw_close(hdev); err_stop: From 9e438fe31e96b75c2ec599f6a9239950e6712358 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 3 Jun 2024 09:41:03 +0200 Subject: [PATCH 272/537] HID: intel-ish-hid: fix endian-conversion The newly added file causes a ton of sparse warnings about the incorrect use of __le32 and similar types: drivers/hid/intel-ish-hid/ishtp/loader.h:41:23: error: invalid bitfield specifier for type restricted __le32. drivers/hid/intel-ish-hid/ishtp/loader.h:42:27: error: invalid bitfield specifier for type restricted __le32. drivers/hid/intel-ish-hid/ishtp/loader.h:43:24: error: invalid bitfield specifier for type restricted __le32. drivers/hid/intel-ish-hid/ishtp/loader.h:44:24: error: invalid bitfield specifier for type restricted __le32. drivers/hid/intel-ish-hid/ishtp/loader.h:45:22: error: invalid bitfield specifier for type restricted __le32. drivers/hid/intel-ish-hid/ishtp/loader.c:172:33: warning: restricted __le32 degrades to integer drivers/hid/intel-ish-hid/ishtp/loader.c:178:50: warning: incorrect type in assignment (different base types) drivers/hid/intel-ish-hid/ishtp/loader.c:178:50: expected restricted __le32 [usertype] length drivers/hid/intel-ish-hid/ishtp/loader.c:178:50: got unsigned long drivers/hid/intel-ish-hid/ishtp/loader.c:179:50: warning: incorrect type in assignment (different base types) drivers/hid/intel-ish-hid/ishtp/loader.c:179:50: expected restricted __le32 [usertype] fw_off drivers/hid/intel-ish-hid/ishtp/loader.c:179:50: got unsigned int [usertype] offset drivers/hid/intel-ish-hid/ishtp/loader.c:180:17: warning: cast from restricted __le32 drivers/hid/intel-ish-hid/ishtp/loader.c:183:24: warning: invalid assignment: += drivers/hid/intel-ish-hid/ishtp/loader.c:183:24: left side has type unsigned int drivers/hid/intel-ish-hid/ishtp/loader.c:183:24: right side has type restricted __le32 Add the necessary conversions and use temporary variables where appropriate to avoid converting back. Fixes: 579a267e4617 ("HID: intel-ish-hid: Implement loading firmware from host feature") Signed-off-by: Arnd Bergmann Reviewed-by: Zhang Lixu Tested-by: Zhang Lixu Signed-off-by: Jiri Kosina --- drivers/hid/intel-ish-hid/ishtp/loader.c | 69 +++++++++++++----------- drivers/hid/intel-ish-hid/ishtp/loader.h | 33 +++++++----- 2 files changed, 57 insertions(+), 45 deletions(-) diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.c b/drivers/hid/intel-ish-hid/ishtp/loader.c index 2785b04a2f5a0..fcca070bdecbe 100644 --- a/drivers/hid/intel-ish-hid/ishtp/loader.c +++ b/drivers/hid/intel-ish-hid/ishtp/loader.c @@ -84,8 +84,8 @@ static int loader_write_message(struct ishtp_device *dev, void *buf, int len) static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len, void *resp, int resp_len) { - struct loader_msg_header *req_hdr = req; - struct loader_msg_header *resp_hdr = resp; + union loader_msg_header req_hdr; + union loader_msg_header resp_hdr; struct device *devc = dev->devc; int rv; @@ -93,34 +93,37 @@ static int loader_xfer_cmd(struct ishtp_device *dev, void *req, int req_len, dev->fw_loader_rx_size = resp_len; rv = loader_write_message(dev, req, req_len); + req_hdr.val32 = le32_to_cpup(req); + if (rv < 0) { - dev_err(devc, "write cmd %u failed:%d\n", req_hdr->command, rv); + dev_err(devc, "write cmd %u failed:%d\n", req_hdr.command, rv); return rv; } /* Wait the ACK */ wait_event_interruptible_timeout(dev->wait_loader_recvd_msg, dev->fw_loader_received, ISHTP_LOADER_TIMEOUT); + resp_hdr.val32 = le32_to_cpup(resp); dev->fw_loader_rx_size = 0; dev->fw_loader_rx_buf = NULL; if (!dev->fw_loader_received) { - dev_err(devc, "wait response of cmd %u timeout\n", req_hdr->command); + dev_err(devc, "wait response of cmd %u timeout\n", req_hdr.command); return -ETIMEDOUT; } - if (!resp_hdr->is_response) { - dev_err(devc, "not a response for %u\n", req_hdr->command); + if (!resp_hdr.is_response) { + dev_err(devc, "not a response for %u\n", req_hdr.command); return -EBADMSG; } - if (req_hdr->command != resp_hdr->command) { - dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr->command, - resp_hdr->command); + if (req_hdr.command != resp_hdr.command) { + dev_err(devc, "unexpected cmd response %u:%u\n", req_hdr.command, + resp_hdr.command); return -EBADMSG; } - if (resp_hdr->status) { - dev_err(devc, "cmd %u failed %u\n", req_hdr->command, resp_hdr->status); + if (resp_hdr.status) { + dev_err(devc, "cmd %u failed %u\n", req_hdr.command, resp_hdr.status); return -EIO; } @@ -157,30 +160,33 @@ static void release_dma_bufs(struct ishtp_device *dev, * @fragment: The ISHTP firmware fragment descriptor * @dma_bufs: The array of DMA fragment buffers * @fragment_size: The size of a single DMA fragment + * @fragment_count: Number of fragments * * Return: 0 on success, negative error code on failure */ static int prepare_dma_bufs(struct ishtp_device *dev, const struct firmware *ish_fw, struct loader_xfer_dma_fragment *fragment, - void **dma_bufs, u32 fragment_size) + void **dma_bufs, u32 fragment_size, u32 fragment_count) { dma_addr_t dma_addr; u32 offset = 0; + u32 length; int i; - for (i = 0; i < fragment->fragment_cnt && offset < ish_fw->size; i++) { + for (i = 0; i < fragment_count && offset < ish_fw->size; i++) { dma_bufs[i] = dma_alloc_coherent(dev->devc, fragment_size, &dma_addr, GFP_KERNEL); if (!dma_bufs[i]) return -ENOMEM; fragment->fragment_tbl[i].ddr_adrs = cpu_to_le64(dma_addr); - fragment->fragment_tbl[i].length = clamp(ish_fw->size - offset, 0, fragment_size); - fragment->fragment_tbl[i].fw_off = offset; - memcpy(dma_bufs[i], ish_fw->data + offset, fragment->fragment_tbl[i].length); + length = clamp(ish_fw->size - offset, 0, fragment_size); + fragment->fragment_tbl[i].length = cpu_to_le32(length); + fragment->fragment_tbl[i].fw_off = cpu_to_le32(offset); + memcpy(dma_bufs[i], ish_fw->data + offset, length); clflush_cache_range(dma_bufs[i], fragment_size); - offset += fragment->fragment_tbl[i].length; + offset += length; } return 0; @@ -208,17 +214,17 @@ void ishtp_loader_work(struct work_struct *work) { DEFINE_RAW_FLEX(struct loader_xfer_dma_fragment, fragment, fragment_tbl, FRAGMENT_MAX_NUM); struct ishtp_device *dev = container_of(work, struct ishtp_device, work_fw_loader); - struct loader_xfer_query query = { - .header.command = LOADER_CMD_XFER_QUERY, - }; - struct loader_start start = { - .header.command = LOADER_CMD_START, - }; + union loader_msg_header query_hdr = { .command = LOADER_CMD_XFER_QUERY, }; + union loader_msg_header start_hdr = { .command = LOADER_CMD_START, }; + union loader_msg_header fragment_hdr = { .command = LOADER_CMD_XFER_FRAGMENT, }; + struct loader_xfer_query query = { .header = cpu_to_le32(query_hdr.val32), }; + struct loader_start start = { .header = cpu_to_le32(start_hdr.val32), }; union loader_recv_message recv_msg; char *filename = dev->driver_data->fw_filename; const struct firmware *ish_fw; void *dma_bufs[FRAGMENT_MAX_NUM] = {}; u32 fragment_size; + u32 fragment_count; int retry = ISHTP_LOADER_RETRY_TIMES; int rv; @@ -228,23 +234,24 @@ void ishtp_loader_work(struct work_struct *work) return; } - fragment->fragment.header.command = LOADER_CMD_XFER_FRAGMENT; - fragment->fragment.xfer_mode = LOADER_XFER_MODE_DMA; - fragment->fragment.is_last = 1; - fragment->fragment.size = ish_fw->size; + fragment->fragment.header = cpu_to_le32(fragment_hdr.val32); + fragment->fragment.xfer_mode = cpu_to_le32(LOADER_XFER_MODE_DMA); + fragment->fragment.is_last = cpu_to_le32(1); + fragment->fragment.size = cpu_to_le32(ish_fw->size); /* Calculate the size of a single DMA fragment */ fragment_size = PFN_ALIGN(DIV_ROUND_UP(ish_fw->size, FRAGMENT_MAX_NUM)); /* Calculate the count of DMA fragments */ - fragment->fragment_cnt = DIV_ROUND_UP(ish_fw->size, fragment_size); + fragment_count = DIV_ROUND_UP(ish_fw->size, fragment_size); + fragment->fragment_cnt = cpu_to_le32(fragment_count); - rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size); + rv = prepare_dma_bufs(dev, ish_fw, fragment, dma_bufs, fragment_size, fragment_count); if (rv) { dev_err(dev->devc, "prepare DMA buffer failed.\n"); goto out; } do { - query.image_size = ish_fw->size; + query.image_size = cpu_to_le32(ish_fw->size); rv = loader_xfer_cmd(dev, &query, sizeof(query), recv_msg.raw_data, sizeof(struct loader_xfer_query_ack)); if (rv) @@ -257,7 +264,7 @@ void ishtp_loader_work(struct work_struct *work) recv_msg.query_ack.version_build); rv = loader_xfer_cmd(dev, fragment, - struct_size(fragment, fragment_tbl, fragment->fragment_cnt), + struct_size(fragment, fragment_tbl, fragment_count), recv_msg.raw_data, sizeof(struct loader_xfer_fragment_ack)); if (rv) continue; /* try again if failed */ diff --git a/drivers/hid/intel-ish-hid/ishtp/loader.h b/drivers/hid/intel-ish-hid/ishtp/loader.h index 7aa45ebc3f7be..308b96085a4db 100644 --- a/drivers/hid/intel-ish-hid/ishtp/loader.h +++ b/drivers/hid/intel-ish-hid/ishtp/loader.h @@ -30,19 +30,23 @@ struct work_struct; #define LOADER_XFER_MODE_DMA BIT(0) /** - * struct loader_msg_header - ISHTP firmware loader message header + * union loader_msg_header - ISHTP firmware loader message header * @command: Command type * @is_response: Indicates if the message is a response * @has_next: Indicates if there is a next message * @reserved: Reserved for future use * @status: Status of the message - */ -struct loader_msg_header { - __le32 command:7; - __le32 is_response:1; - __le32 has_next:1; - __le32 reserved:15; - __le32 status:8; + * @val32: entire header as a 32-bit value + */ +union loader_msg_header { + struct { + __u32 command:7; + __u32 is_response:1; + __u32 has_next:1; + __u32 reserved:15; + __u32 status:8; + }; + __u32 val32; }; /** @@ -51,7 +55,7 @@ struct loader_msg_header { * @image_size: Size of the image */ struct loader_xfer_query { - struct loader_msg_header header; + __le32 header; __le32 image_size; }; @@ -103,7 +107,7 @@ struct loader_capability { * @capability: Loader capability */ struct loader_xfer_query_ack { - struct loader_msg_header header; + __le32 header; __le16 version_major; __le16 version_minor; __le16 version_hotfix; @@ -122,7 +126,7 @@ struct loader_xfer_query_ack { * @is_last: Is last */ struct loader_xfer_fragment { - struct loader_msg_header header; + __le32 header; __le32 xfer_mode; __le32 offset; __le32 size; @@ -134,7 +138,7 @@ struct loader_xfer_fragment { * @header: Header of the message */ struct loader_xfer_fragment_ack { - struct loader_msg_header header; + __le32 header; }; /** @@ -170,7 +174,7 @@ struct loader_xfer_dma_fragment { * @header: Header of the message */ struct loader_start { - struct loader_msg_header header; + __le32 header; }; /** @@ -178,10 +182,11 @@ struct loader_start { * @header: Header of the message */ struct loader_start_ack { - struct loader_msg_header header; + __le32 header; }; union loader_recv_message { + __le32 header; struct loader_xfer_query_ack query_ack; struct loader_xfer_fragment_ack fragment_ack; struct loader_start_ack start_ack; From 9c8f05cf1d7abd1bfb53cebe691bf3acb7baee99 Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Mon, 3 Jun 2024 23:00:59 -0700 Subject: [PATCH 273/537] HID: logitech-hidpp: add missing MODULE_DESCRIPTION() macro make allmodconfig && make W=1 C=1 reports: WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/hid/hid-logitech-hidpp.o Add the missing invocation of the MODULE_DESCRIPTION() macro. Signed-off-by: Jeff Johnson Signed-off-by: Jiri Kosina --- drivers/hid/hid-logitech-hidpp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index b81d5bcc76a77..400d70e6dbe2d 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -27,6 +27,7 @@ #include "usbhid/usbhid.h" #include "hid-ids.h" +MODULE_DESCRIPTION("Support for Logitech devices relying on the HID++ specification"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Benjamin Tissoires "); MODULE_AUTHOR("Nestor Lopez Casado "); From 0698ff57bf327d9a5735a898f78161b8dada160b Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2024 13:54:08 +0200 Subject: [PATCH 274/537] drm/xe/pf: Update the LMTT when freeing VF GT config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The LMTT must be updated whenever we change the VF LMEM configuration. We missed that step when freeing the whole VF GT config, which could result in stale PTE in LMTT or LMTT PT object leaks. Fix that. Fixes: ac6598aed1b3 ("drm/xe/pf: Add support to configure SR-IOV VFs") Signed-off-by: Michal Wajdeczko Reviewed-by: Piotr Piórkowski Link: https://patchwork.freedesktop.org/patch/msgid/20240527115408.1064-1-michal.wajdeczko@intel.com (cherry picked from commit c063cce7df3a765539e2a2d75ab943f334446cce) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 79116ad586204..476d613333a98 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1749,6 +1749,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) if (!xe_gt_is_media_type(gt)) { pf_release_vf_config_ggtt(gt, config); pf_release_vf_config_lmem(gt, config); + pf_update_vf_lmtt(gt_to_xe(gt), vfid); } pf_release_config_ctxs(gt, config); pf_release_config_dbs(gt, config); From 1db5322b7e6b58e1b304ce69a50e9dca798ca95b Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Thu, 30 May 2024 12:14:15 +0300 Subject: [PATCH 275/537] mei: demote client disconnect warning on suspend to debug Change level for the "not connected" client message in the write callback from error to debug. The MEI driver currently disconnects all clients upon system suspend. This behavior is by design and user-space applications with open connections before the suspend are expected to handle errors upon resume, by reopening their handles, reconnecting, and retrying their operations. However, the current driver implementation logs an error message every time a write operation is attempted on a disconnected client. Since this is a normal and expected flow after system resume logging this as an error can be misleading. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20240530091415.725247-1-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 79e6f3c1341fe..40c3fe26f76df 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -329,7 +329,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, } if (!mei_cl_is_connected(cl)) { - cl_err(dev, cl, "is not connected"); + cl_dbg(dev, cl, "is not connected"); rets = -ENODEV; goto out; } From 283cb234ef95d94c61f59e1cd070cd9499b51292 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Tue, 4 Jun 2024 12:07:28 +0300 Subject: [PATCH 276/537] mei: me: release irq in mei_me_pci_resume error path The mei_me_pci_resume doesn't release irq on the error path, in case mei_start() fails. Cc: Fixes: 33ec08263147 ("mei: revamp mei reset state machine") Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20240604090728.1027307-1-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/pci-me.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 7f59dd38c32f5..6589635f8ba32 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device) } err = mei_restart(dev); - if (err) + if (err) { + free_irq(pdev->irq, dev); return err; + } /* Start timer if stopped in suspend */ schedule_delayed_work(&dev->timer_work, HZ); From 9b5e045029d8bded4c6979874ed3abc347c1415c Mon Sep 17 00:00:00 2001 From: Wentong Wu Date: Mon, 27 May 2024 20:38:35 +0800 Subject: [PATCH 277/537] mei: vsc: Don't stop/restart mei device during system suspend/resume The dynamically created mei client device (mei csi) is used as one V4L2 sub device of the whole video pipeline, and the V4L2 connection graph is built by software node. The mei_stop() and mei_restart() will delete the old mei csi client device and create a new mei client device, which will cause the software node information saved in old mei csi device lost and the whole video pipeline will be broken. Removing mei_stop()/mei_restart() during system suspend/resume can fix the issue above and won't impact hardware actual power saving logic. Fixes: f6085a96c973 ("mei: vsc: Unregister interrupt handler for system suspend") Cc: stable@vger.kernel.org # for 6.8+ Reported-by: Hao Yao Signed-off-by: Wentong Wu Reviewed-by: Sakari Ailus Tested-by: Jason Chen Tested-by: Sakari Ailus Acked-by: Tomas Winkler Link: https://lore.kernel.org/r/20240527123835.522384-1-wentong.wu@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/platform-vsc.c | 39 +++++++++++++-------------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c index b543e6b9f3cfd..1ec65d87488a3 100644 --- a/drivers/misc/mei/platform-vsc.c +++ b/drivers/misc/mei/platform-vsc.c @@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev) static int mei_vsc_suspend(struct device *dev) { - struct mei_device *mei_dev = dev_get_drvdata(dev); - struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + struct mei_device *mei_dev; + int ret = 0; - mei_stop(mei_dev); + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; - mei_disable_interrupts(mei_dev); + mutex_lock(&mei_dev->device_lock); - vsc_tp_free_irq(hw->tp); + if (!mei_write_is_idle(mei_dev)) + ret = -EAGAIN; - return 0; + mutex_unlock(&mei_dev->device_lock); + + return ret; } static int mei_vsc_resume(struct device *dev) { - struct mei_device *mei_dev = dev_get_drvdata(dev); - struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); - int ret; - - ret = vsc_tp_request_irq(hw->tp); - if (ret) - return ret; - - ret = mei_restart(mei_dev); - if (ret) - goto err_free; + struct mei_device *mei_dev; - /* start timer if stopped in suspend */ - schedule_delayed_work(&mei_dev->timer_work, HZ); + mei_dev = dev_get_drvdata(dev); + if (!mei_dev) + return -ENODEV; return 0; - -err_free: - vsc_tp_free_irq(hw->tp); - - return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume); From af076156ec6d70332f1555754e99d4a3771ec297 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 3 Jun 2024 22:50:50 +0200 Subject: [PATCH 278/537] mei: vsc: Fix wrong invocation of ACPI SID method When using an initializer for a union only one of the union members must be initialized. The initializer for the acpi_object union variable passed as argument to the SID ACPI method was initializing both the type and the integer members of the union. Unfortunately rather then complaining about this gcc simply ignores the first initializer and only used the second integer.value = 1 initializer. Leaving type set to 0 which leads to the argument being skipped by acpi acpi_ns_evaluate() resulting in: ACPI Warning: \_SB.PC00.SPI1.SPFD.CVFD.SID: Insufficient arguments - Caller passed 0, method requires 1 (20240322/nsarguments-232) Fix this by initializing only the integer struct part of the union and initializing both members of the integer struct. Signed-off-by: Hans de Goede Reviewed-by: Sakari Ailus Fixes: 566f5ca97680 ("mei: Add transport driver for IVSC device") Reviewed-by: Wentong Wu Link: https://lore.kernel.org/r/20240603205050.505389-1-hdegoede@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/vsc-fw-loader.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c index ffa4ccd96a104..596a9d695dfc1 100644 --- a/drivers/misc/mei/vsc-fw-loader.c +++ b/drivers/misc/mei/vsc-fw-loader.c @@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader, { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER }; union acpi_object obj = { - .type = ACPI_TYPE_INTEGER, + .integer.type = ACPI_TYPE_INTEGER, .integer.value = 1, }; struct acpi_object_list arg_list = { From 73fedc31fed38cb6039fd8a7efea1774143b68b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 13 May 2024 09:52:06 +0200 Subject: [PATCH 279/537] parport: amiga: Mark driver struct with __refdata to prevent section mismatch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As described in the added code comment, a reference to .exit.text is ok for drivers registered via module_platform_driver_probe(). Make this explicit to prevent the following section mismatch warning WARNING: modpost: drivers/parport/parport_amiga: section mismatch in reference: amiga_parallel_driver+0x8 (section: .data) -> amiga_parallel_remove (section: .exit.text) that triggers on an allmodconfig W=1 build. Signed-off-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20240513075206.2337310-2-u.kleine-koenig@pengutronix.de Signed-off-by: Greg Kroah-Hartman --- drivers/parport/parport_amiga.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c index e6dc857aac3fe..e06c7b2aac5c4 100644 --- a/drivers/parport/parport_amiga.c +++ b/drivers/parport/parport_amiga.c @@ -229,7 +229,13 @@ static void __exit amiga_parallel_remove(struct platform_device *pdev) parport_put_port(port); } -static struct platform_driver amiga_parallel_driver = { +/* + * amiga_parallel_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver amiga_parallel_driver __refdata = { .remove_new = __exit_p(amiga_parallel_remove), .driver = { .name = "amiga-parallel", From 086c6cbcc563c81d55257f9b27e14faf1d0963d3 Mon Sep 17 00:00:00 2001 From: Yongzhi Liu Date: Thu, 23 May 2024 20:14:33 +0800 Subject: [PATCH 280/537] misc: microchip: pci1xxxx: fix double free in the error handling of gp_aux_bus_probe() When auxiliary_device_add() returns error and then calls auxiliary_device_uninit(), callback function gp_auxiliary_device_release() calls ida_free() and kfree(aux_device_wrapper) to free memory. We should't call them again in the error handling path. Fix this by skipping the redundant cleanup functions. Fixes: 393fc2f5948f ("misc: microchip: pci1xxxx: load auxiliary bus driver for the PIO function in the multi-function endpoint of pci1xxxx device.") Signed-off-by: Yongzhi Liu Link: https://lore.kernel.org/r/20240523121434.21855-3-hyperlyzcs@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c index 32af2b14ff344..de75d89ef53e8 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c @@ -111,6 +111,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id err_aux_dev_add_1: auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev); + goto err_aux_dev_add_0; err_aux_dev_init_1: ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id); @@ -120,6 +121,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id err_aux_dev_add_0: auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev); + goto err_ret; err_aux_dev_init_0: ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id); @@ -127,6 +129,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id err_ida_alloc_0: kfree(aux_bus->aux_device_wrapper[0]); +err_ret: return retval; } From 77427e3d5c353e3dd98c7c0af322f8d9e3131ace Mon Sep 17 00:00:00 2001 From: Yongzhi Liu Date: Thu, 23 May 2024 20:14:34 +0800 Subject: [PATCH 281/537] misc: microchip: pci1xxxx: Fix a memory leak in the error handling of gp_aux_bus_probe() There is a memory leak (forget to free allocated buffers) in a memory allocation failure path. Fix it to jump to the correct error handling code. Fixes: 393fc2f5948f ("misc: microchip: pci1xxxx: load auxiliary bus driver for the PIO function in the multi-function endpoint of pci1xxxx device.") Signed-off-by: Yongzhi Liu Reviewed-by: Kumaravel Thiagarajan Link: https://lore.kernel.org/r/20240523121434.21855-4-hyperlyzcs@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c index de75d89ef53e8..34c9be437432a 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c @@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]), GFP_KERNEL); - if (!aux_bus->aux_device_wrapper[1]) - return -ENOMEM; + if (!aux_bus->aux_device_wrapper[1]) { + retval = -ENOMEM; + goto err_aux_dev_add_0; + } retval = ida_alloc(&gp_client_ida, GFP_KERNEL); if (retval < 0) From 7c55b78818cfb732680c4a72ab270cc2d2ee3d0f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 14 May 2024 12:06:34 +0200 Subject: [PATCH 282/537] jfs: xattr: fix buffer overflow for invalid xattr When an xattr size is not what is expected, it is printed out to the kernel log in hex format as a form of debugging. But when that xattr size is bigger than the expected size, printing it out can cause an access off the end of the buffer. Fix this all up by properly restricting the size of the debug hex dump in the kernel log. Reported-by: syzbot+9dfe490c8176301c1d06@syzkaller.appspotmail.com Cc: Dave Kleikamp Link: https://lore.kernel.org/r/2024051433-slider-cloning-98f9@gregkh Signed-off-by: Greg Kroah-Hartman --- fs/jfs/xattr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 0fb7afac298e1..9987055293b35 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -557,9 +557,11 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) size_check: if (EALIST_SIZE(ea_buf->xattr) != ea_size) { + int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size); + printk(KERN_ERR "ea_get: invalid extended attribute\n"); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, - ea_buf->xattr, ea_size, 1); + ea_buf->xattr, size, 1); ea_release(inode, ea_buf); rc = -EIO; goto clean_up; From 616501eccb58615f8f352a29239ea6c6fc5e6546 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 27 May 2024 09:00:06 +0100 Subject: [PATCH 283/537] clkdev: don't fail clkdev_alloc() if over-sized Don't fail clkdev_alloc() if the strings are over-sized. In this case, the entry will not match during lookup, so its useless. However, since code fails if we return NULL leading to boot failure, return a dummy entry with the connection and device IDs set to "bad". Leave the warning so these problems can be found, and the useless wasteful clkdev registrations removed. Reported-by: Ron Economos Reported-by: Guenter Roeck Fixes: 8d532528ff6a ("clkdev: report over-sized strings when creating clkdev entries") Closes: https://lore.kernel.org/linux-clk/7eda7621-0dde-4153-89e4-172e4c095d01@roeck-us.net. Link: https://lore.kernel.org/r/28114882-f8d7-21bf-4536-a186e8d7a22a@w6rz.net Tested-by: Ron Economos Signed-off-by: Russell King (Oracle) --- drivers/clk/clkdev.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 56a12f1da472d..476fb9b7adb5e 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -204,8 +204,15 @@ vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt, pr_err("%pV:%s: %s ID is greater than %zu\n", &vaf, con_id, failure, max_size); va_end(ap_copy); - kfree(cla); - return NULL; + + /* + * Don't fail in this case, but as the entry won't ever match just + * fill it with something that also won't match. + */ + strscpy(cla->con_id, "bad", sizeof(cla->con_id)); + strscpy(cla->dev_id, "bad", sizeof(cla->dev_id)); + + return &cla->cl; } static struct clk_lookup * From c0a40097f0bc81deafc15f9195d1fb54595cd6d0 Mon Sep 17 00:00:00 2001 From: Dirk Behme Date: Mon, 13 May 2024 07:06:34 +0200 Subject: [PATCH 284/537] drivers: core: synchronize really_probe() and dev_uevent() Synchronize the dev->driver usage in really_probe() and dev_uevent(). These can run in different threads, what can result in the following race condition for dev->driver uninitialization: Thread #1: ========== really_probe() { ... probe_failed: ... device_unbind_cleanup(dev) { ... dev->driver = NULL; // <= Failed probe sets dev->driver to NULL ... } ... } Thread #2: ========== dev_uevent() { ... if (dev->driver) // If dev->driver is NULLed from really_probe() from here on, // after above check, the system crashes add_uevent_var(env, "DRIVER=%s", dev->driver->name); ... } really_probe() holds the lock, already. So nothing needs to be done there. dev_uevent() is called with lock held, often, too. But not always. What implies that we can't add any locking in dev_uevent() itself. So fix this race by adding the lock to the non-protected path. This is the path where above race is observed: dev_uevent+0x235/0x380 uevent_show+0x10c/0x1f0 <= Add lock here dev_attr_show+0x3a/0xa0 sysfs_kf_seq_show+0x17c/0x250 kernfs_seq_show+0x7c/0x90 seq_read_iter+0x2d7/0x940 kernfs_fop_read_iter+0xc6/0x310 vfs_read+0x5bc/0x6b0 ksys_read+0xeb/0x1b0 __x64_sys_read+0x42/0x50 x64_sys_call+0x27ad/0x2d30 do_syscall_64+0xcd/0x1d0 entry_SYSCALL_64_after_hwframe+0x77/0x7f Similar cases are reported by syzkaller in https://syzkaller.appspot.com/bug?extid=ffa8143439596313a85a But these are regarding the *initialization* of dev->driver dev->driver = drv; As this switches dev->driver to non-NULL these reports can be considered to be false-positives (which should be "fixed" by this commit, as well, though). The same issue was reported and tried to be fixed back in 2015 in https://lore.kernel.org/lkml/1421259054-2574-1-git-send-email-a.sangwan@samsung.com/ already. Fixes: 239378f16aa1 ("Driver core: add uevent vars for devices of a class") Cc: stable Cc: syzbot+ffa8143439596313a85a@syzkaller.appspotmail.com Cc: Ashish Sangwan Cc: Namjae Jeon Signed-off-by: Dirk Behme Link: https://lore.kernel.org/r/20240513050634.3964461-1-dirk.behme@de.bosch.com Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/base/core.c b/drivers/base/core.c index 2e776fcc31b69..2b4c0624b7043 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2739,8 +2739,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, if (!env) return -ENOMEM; + /* Synchronize with really_probe() */ + device_lock(dev); /* let the kset specific function add its keys */ retval = kset->uevent_ops->uevent(&dev->kobj, env); + device_unlock(dev); if (retval) goto out; From c9d52fb313d3719d69a040f4ca78a3e2e95fba21 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 30 May 2024 18:04:24 -0700 Subject: [PATCH 285/537] PCI: Revert the cfg_access_lock lockdep mechanism While the experiment did reveal that there are additional places that are missing the lock during secondary bus reset, one of the places that needs to take cfg_access_lock (pci_bus_lock()) is not prepared for lockdep annotation. Specifically, pci_bus_lock() takes pci_dev_lock() recursively and is currently dependent on the fact that the device_lock() is marked lockdep_set_novalidate_class(&dev->mutex). Otherwise, without that annotation, pci_bus_lock() would need to use something like a new pci_dev_lock_nested() helper, a scheme to track a PCI device's depth in the topology, and a hope that the depth of a PCI tree never exceeds the max value for a lockdep subclass. The alternative to ripping out the lockdep coverage would be to deploy a dynamic lock key for every PCI device. Unfortunately, there is evidence that increasing the number of keys that lockdep needs to track to be per-PCI-device is prohibitively expensive for something like the cfg_access_lock. The main motivation for adding the annotation in the first place was to catch unlocked secondary bus resets, not necessarily catch lock ordering problems between cfg_access_lock and other locks. Solve that narrower problem with follow-on patches, and just due to targeted revert for now. Link: https://lore.kernel.org/r/171711746402.1628941.14575335981264103013.stgit@dwillia2-xfh.jf.intel.com Fixes: 7e89efc6e9e4 ("PCI: Lock upstream bridge for pci_reset_function()") Reported-by: Imre Deak Closes: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_134186v1/shard-dg2-1/igt@device_reset@unbind-reset-rebind.html Signed-off-by: Dan Williams Signed-off-by: Bjorn Helgaas Tested-by: Hans de Goede Tested-by: Kalle Valo Reviewed-by: Dave Jiang Cc: Jani Saarinen --- drivers/pci/access.c | 4 ---- drivers/pci/pci.c | 1 - drivers/pci/probe.c | 3 --- include/linux/lockdep.h | 5 ----- include/linux/pci.h | 2 -- 5 files changed, 15 deletions(-) diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 30f031de9cfe8..b123da16b63ba 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -289,8 +289,6 @@ void pci_cfg_access_lock(struct pci_dev *dev) { might_sleep(); - lock_map_acquire(&dev->cfg_access_lock); - raw_spin_lock_irq(&pci_lock); if (dev->block_cfg_access) pci_wait_cfg(dev); @@ -345,8 +343,6 @@ void pci_cfg_access_unlock(struct pci_dev *dev) raw_spin_unlock_irqrestore(&pci_lock, flags); wake_up_all(&pci_cfg_wait); - - lock_map_release(&dev->cfg_access_lock); } EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 59e0949fb079d..35fb1f17a589c 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4883,7 +4883,6 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) */ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { - lock_map_assert_held(&dev->cfg_access_lock); pcibios_reset_secondary_bus(dev); return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8e696e547565c..5fbabb4e3425f 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2546,9 +2546,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_parms = &dev->dma_parms; dev->dev.coherent_dma_mask = 0xffffffffull; - lockdep_register_key(&dev->cfg_access_key); - lockdep_init_map(&dev->cfg_access_lock, dev_name(&dev->dev), - &dev->cfg_access_key, 0); dma_set_max_seg_size(&dev->dev, 65536); dma_set_seg_boundary(&dev->dev, 0xffffffff); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 5e51b0de4c4b5..08b0d1d9d78b7 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -297,9 +297,6 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); .wait_type_inner = _wait_type, \ .lock_type = LD_LOCK_WAIT_OVERRIDE, } -#define lock_map_assert_held(l) \ - lockdep_assert(lock_is_held(l) != LOCK_STATE_NOT_HELD) - #else /* !CONFIG_LOCKDEP */ static inline void lockdep_init_task(struct task_struct *task) @@ -391,8 +388,6 @@ extern int lockdep_is_held(const void *); #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ struct lockdep_map __maybe_unused _name = {} -#define lock_map_assert_held(l) do { (void)(l); } while (0) - #endif /* !LOCKDEP */ #ifdef CONFIG_PROVE_LOCKING diff --git a/include/linux/pci.h b/include/linux/pci.h index fb004fd4e8890..cafc5ab1cbcb4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -413,8 +413,6 @@ struct pci_dev { struct resource driver_exclusive_resource; /* driver exclusive resource ranges */ bool match_driver; /* Skip attaching driver */ - struct lock_class_key cfg_access_key; - struct lockdep_map cfg_access_lock; unsigned int transparent:1; /* Subtractive decode bridge */ unsigned int io_window:1; /* Bridge has I/O window */ From dc6abbbde4b099e936cd5428e196d86a5e119aae Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 3 Jun 2024 15:25:23 -0300 Subject: [PATCH 286/537] tools headers arm64: Sync arm64's cputype.h with the kernel sources To get the changes in: 0ce85db6c2141b7f ("arm64: cputype: Add Neoverse-V3 definitions") 02a0a04676fa7796 ("arm64: cputype: Add Cortex-X4 definitions") f4d9d9dcc70b96b5 ("arm64: Add Neoverse-V2 part") That makes this perf source code to be rebuilt: CC /tmp/build/perf-tools/util/arm-spe.o The changes in the above patch add MIDR_NEOVERSE_V[23] and MIDR_NEOVERSE_V1 is used in arm-spe.c, so probably we need to add those and perhaps MIDR_CORTEX_X4 to that array? Or maybe we need to leave this for later when this is all tested on those machines? static const struct midr_range neoverse_spe[] = { MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), {}, }; Mark Rutland recommended about arm-spe.c: "I would not touch this for now -- someone would have to go audit the TRMs to check that those other cores have the same encoding, and I think it'd be better to do that as a follow-up." That addresses this perf build warning: Warning: Kernel ABI header differences: diff -u tools/arch/arm64/include/asm/cputype.h arch/arm64/include/asm/cputype.h Acked-by: Mark Rutland Cc: Adrian Hunter Cc: Besar Wicaksono Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Namhyung Kim Cc: Will Deacon Link: https://lore.kernel.org/lkml/Zl8cYk0Tai2fs7aM@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/arm64/include/asm/cputype.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h index 52f076afeb960..7b32b99023a21 100644 --- a/tools/arch/arm64/include/asm/cputype.h +++ b/tools/arch/arm64/include/asm/cputype.h @@ -86,6 +86,9 @@ #define ARM_CPU_PART_CORTEX_X2 0xD48 #define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define ARM_CPU_PART_CORTEX_A78C 0xD4B +#define ARM_CPU_PART_NEOVERSE_V2 0xD4F +#define ARM_CPU_PART_CORTEX_X4 0xD82 +#define ARM_CPU_PART_NEOVERSE_V3 0xD84 #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 @@ -159,6 +162,9 @@ #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) +#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2) +#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4) +#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) From 0ea00e249ca992adee54dc71a526ee70ef109e40 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 29 May 2024 15:23:25 +0300 Subject: [PATCH 287/537] tpm_tis: Do *not* flush uninitialized work tpm_tis_core_init() may fail before tpm_tis_probe_irq_single() is called, in which case tpm_tis_remove() unconditionally calling flush_work() is triggering a warning for .func still being NULL. Cc: stable@vger.kernel.org # v6.5+ Fixes: 481c2d14627d ("tpm,tpm_tis: Disable interrupts after 1000 unhandled IRQs") Signed-off-by: Jan Beulich Reviewed-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen --- drivers/char/tpm/tpm_tis_core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 176cd8dbf1db2..fdef214b9f6bf 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -1020,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip) interrupt = 0; tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt); - flush_work(&priv->free_irq_work); + if (priv->free_irq_work.func) + flush_work(&priv->free_irq_work); tpm_tis_clkrun_enable(chip, false); From f071d02ecad4cfbf3ab41807c90bd1fef1cbfd3f Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 5 Jun 2024 01:28:42 +0300 Subject: [PATCH 288/537] tpm: Switch to new Intel CPU model defines New CPU #defines encode vendor and family as well as model. Link: https://lore.kernel.org/all/20240520224620.9480-4-tony.luck@intel.com/ Signed-off-by: Tony Luck Reviewed-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen --- drivers/char/tpm/tpm.h | 2 +- drivers/char/tpm/tpm_tis_core.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 6b8b9956ba694..7bb87fa5f7a12 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -28,7 +28,7 @@ #include #ifdef CONFIG_X86 -#include +#include #endif #define TPM_MINOR 224 /* officially assigned */ diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 13e99cf65efe4..690ad8e9b7319 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -210,7 +210,7 @@ static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len, static inline bool is_bsw(void) { #ifdef CONFIG_X86 - return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0); + return (boot_cpu_data.x86_vfm == INTEL_ATOM_AIRMONT) ? 1 : 0; #else return false; #endif From d0d1df8ba18abc57f28fb3bc053b2bf319367f2c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 4 Jun 2024 17:00:24 +0200 Subject: [PATCH 289/537] bpf: Set run context for rawtp test_run callback syzbot reported crash when rawtp program executed through the test_run interface calls bpf_get_attach_cookie helper or any other helper that touches task->bpf_ctx pointer. Setting the run context (task->bpf_ctx pointer) for test_run callback. Fixes: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value") Reported-by: syzbot+3ab78ff125b7979e45f9@syzkaller.appspotmail.com Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Closes: https://syzkaller.appspot.com/bug?extid=3ab78ff125b7979e45f9 Link: https://lore.kernel.org/bpf/20240604150024.359247-1-jolsa@kernel.org --- net/bpf/test_run.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index f6aad4ed2ab2f..36ae54f57bf57 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -727,10 +727,16 @@ static void __bpf_prog_test_run_raw_tp(void *data) { struct bpf_raw_tp_test_run_info *info = data; + struct bpf_trace_run_ctx run_ctx = {}; + struct bpf_run_ctx *old_run_ctx; + + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); rcu_read_lock(); info->retval = bpf_prog_run(info->prog, info->ctx); rcu_read_unlock(); + + bpf_reset_run_ctx(old_run_ctx); } int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, From 7fcf26b315bbb728036da0862de6b335da83dff2 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 4 Jun 2024 14:29:25 +0200 Subject: [PATCH 290/537] Revert "xsk: Support redirect to any socket bound to the same umem" This reverts commit 2863d665ea41282379f108e4da6c8a2366ba66db. This patch introduced a potential kernel crash when multiple napi instances redirect to the same AF_XDP socket. By removing the queue_index check, it is possible for multiple napi instances to access the Rx ring at the same time, which will result in a corrupted ring state which can lead to a crash when flushing the rings in __xsk_flush(). This can happen when the linked list of sockets to flush gets corrupted by concurrent accesses. A quick and small fix is not possible, so let us revert this for now. Reported-by: Yuval El-Hanany Signed-off-by: Magnus Karlsson Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/xdp-newbies/8100DBDC-0B7C-49DB-9995-6027F6E63147@radware.com Link: https://lore.kernel.org/bpf/20240604122927.29080-2-magnus.karlsson@gmail.com --- net/xdp/xsk.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 727aa20be4bde..7d1c0986f9bb3 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -313,13 +313,10 @@ static bool xsk_is_bound(struct xdp_sock *xs) static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { - struct net_device *dev = xdp->rxq->dev; - u32 qid = xdp->rxq->queue_index; - if (!xsk_is_bound(xs)) return -ENXIO; - if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem) + if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) return -EINVAL; if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) { From 03e38d315f3c5258270ad50f2ae784b6372e87c3 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 4 Jun 2024 14:29:26 +0200 Subject: [PATCH 291/537] Revert "xsk: Document ability to redirect to any socket bound to the same umem" This reverts commit 968595a93669b6b4f6d1fcf80cf2d97956b6868f. Reported-by: Yuval El-Hanany Signed-off-by: Magnus Karlsson Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/xdp-newbies/8100DBDC-0B7C-49DB-9995-6027F6E63147@radware.com Link: https://lore.kernel.org/bpf/20240604122927.29080-3-magnus.karlsson@gmail.com --- Documentation/networking/af_xdp.rst | 33 ++++++++++++----------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst index 72da7057e4cf9..dceeb0d763aa2 100644 --- a/Documentation/networking/af_xdp.rst +++ b/Documentation/networking/af_xdp.rst @@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the sxdp_shared_umem_fd field as you registered the UMEM on that socket. These two sockets will now share one and the same UMEM. -In this case, it is possible to use the NIC's packet steering -capabilities to steer the packets to the right queue. This is not -possible in the previous example as there is only one queue shared -among sockets, so the NIC cannot do this steering as it can only steer -between queues. - -In libxdp (or libbpf prior to version 1.0), you need to use the -xsk_socket__create_shared() API as it takes a reference to a FILL ring -and a COMPLETION ring that will be created for you and bound to the -shared UMEM. You can use this function for all the sockets you create, -or you can use it for the second and following ones and use -xsk_socket__create() for the first one. Both methods yield the same -result. +There is no need to supply an XDP program like the one in the previous +case where sockets were bound to the same queue id and +device. Instead, use the NIC's packet steering capabilities to steer +the packets to the right queue. In the previous example, there is only +one queue shared among sockets, so the NIC cannot do this steering. It +can only steer between queues. + +In libbpf, you need to use the xsk_socket__create_shared() API as it +takes a reference to a FILL ring and a COMPLETION ring that will be +created for you and bound to the shared UMEM. You can use this +function for all the sockets you create, or you can use it for the +second and following ones and use xsk_socket__create() for the first +one. Both methods yield the same result. Note that a UMEM can be shared between sockets on the same queue id and device, as well as between queues on the same device and between -devices at the same time. It is also possible to redirect to any -socket as long as it is bound to the same umem with XDP_SHARED_UMEM. +devices at the same time. XDP_USE_NEED_WAKEUP bind flag ----------------------------- @@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The switch, or other distribution mechanism, in your NIC to direct traffic to the correct queue id and socket. - Note that if you are using the XDP_SHARED_UMEM option, it is - possible to switch traffic between any socket bound to the same - umem. - Q: My packets are sometimes corrupted. What is wrong? A: Care has to be taken not to feed the same buffer in the UMEM into From 99280413a5b785f22d91e8a8a66dc38f4a214495 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 4 Jun 2024 17:45:20 +0200 Subject: [PATCH 292/537] efi: Add missing __nocfi annotations to runtime wrappers The EFI runtime wrappers are a sandbox for calling into EFI runtime services, which are invoked using indirect calls. When running with kCFI enabled, the compiler will require the target of any indirect call to be type annotated. Given that the EFI runtime services prototypes and calling convention are governed by the EFI spec, not the Linux kernel, adding such type annotations for firmware routines is infeasible, and so the compiler must be informed that prototype validation should be omitted. Add the __nocfi annotation at the appropriate places in the EFI runtime wrapper code to achieve this. Note that this currently only affects 32-bit ARM, given that other architectures that support both kCFI and EFI use an asm wrapper to call EFI runtime services, and this hides the indirect call from the compiler. Fixes: 1a4fec49efe5 ("ARM: 9392/2: Support CLANG CFI") Reviewed-by: Linus Walleij Tested-by: Nathan Chancellor Signed-off-by: Ard Biesheuvel --- drivers/firmware/efi/runtime-wrappers.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 5d56bc40a79d7..708b777857d34 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -213,7 +213,7 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); * Calls the appropriate efi_runtime_service() with the appropriate * arguments. */ -static void efi_call_rts(struct work_struct *work) +static void __nocfi efi_call_rts(struct work_struct *work) { const union efi_rts_args *args = efi_rts_work.args; efi_status_t status = EFI_NOT_FOUND; @@ -435,7 +435,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, return status; } -static efi_status_t +static efi_status_t __nocfi virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr, unsigned long data_size, void *data) { @@ -469,7 +469,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr, return status; } -static efi_status_t +static efi_status_t __nocfi virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space, u64 *remaining_space, u64 *max_variable_size) { @@ -499,10 +499,9 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) return status; } -static void virt_efi_reset_system(int reset_type, - efi_status_t status, - unsigned long data_size, - efi_char16_t *data) +static void __nocfi +virt_efi_reset_system(int reset_type, efi_status_t status, + unsigned long data_size, efi_char16_t *data) { if (down_trylock(&efi_runtime_lock)) { pr_warn("failed to invoke the reset_system() runtime service:\n" From 8b0f7410942cdc420c4557eda02bfcdf60ccec17 Mon Sep 17 00:00:00 2001 From: Subbaraya Sundeep Date: Wed, 29 May 2024 20:59:44 +0530 Subject: [PATCH 293/537] octeontx2-af: Always allocate PF entries from low prioriy zone PF mcam entries has to be at low priority always so that VF can install longest prefix match rules at higher priority. This was taken care currently but when priority allocation wrt reference entry is requested then entries are allocated from mid-zone instead of low priority zone. Fix this and always allocate entries from low priority zone for PFs. Fixes: 7df5b4b260dd ("octeontx2-af: Allocate low priority entries for PF") Signed-off-by: Subbaraya Sundeep Reviewed-by: Jacob Keller Signed-off-by: David S. Miller --- .../ethernet/marvell/octeontx2/af/rvu_npc.c | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index e8b73b9d75e31..97722ce8c4cb3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, * - when available free entries are less. * Lower priority ones out of avaialble free entries are always * chosen when 'high vs low' question arises. + * + * For a VF base MCAM match rule is set by its PF. And all the + * further MCAM rules installed by VF on its own are + * concatenated with the base rule set by its PF. Hence PF entries + * should be at lower priority compared to VF entries. Otherwise + * base rule is hit always and rules installed by VF will be of + * no use. Hence if the request is from PF then allocate low + * priority entries. */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + goto lprio_alloc; /* Get the search range for priority allocation request */ if (req->priority) { @@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, goto alloc; } - /* For a VF base MCAM match rule is set by its PF. And all the - * further MCAM rules installed by VF on its own are - * concatenated with the base rule set by its PF. Hence PF entries - * should be at lower priority compared to VF entries. Otherwise - * base rule is hit always and rules installed by VF will be of - * no use. Hence if the request is from PF and NOT a priority - * allocation request then allocate low priority entries. - */ - if (!(pcifunc & RVU_PFVF_FUNC_MASK)) - goto lprio_alloc; - /* Find out the search range for non-priority allocation request * * Get MCAM free entry count in middle zone. @@ -2568,6 +2567,18 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, reverse = true; start = 0; end = mcam->bmap_entries; + /* Ensure PF requests are always at bottom and if PF requests + * for higher/lower priority entry wrt reference entry then + * honour that criteria and start search for entries from bottom + * and not in mid zone. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK) && + req->priority == NPC_MCAM_HIGHER_PRIO) + end = req->ref_entry; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK) && + req->priority == NPC_MCAM_LOWER_PRIO) + start = req->ref_entry; } alloc: From fb0aa0781a5f457e3864da68af52c3b1f4f7fd8f Mon Sep 17 00:00:00 2001 From: Wen Gu Date: Fri, 31 May 2024 16:54:17 +0800 Subject: [PATCH 294/537] net/smc: avoid overwriting when adjusting sock bufsizes When copying smc settings to clcsock, avoid setting clcsock's sk_sndbuf to sysctl_tcp_wmem[1], since this may overwrite the value set by tcp_sndbuf_expand() in TCP connection establishment. And the other setting sk_{snd|rcv}buf to sysctl value in smc_adjust_sock_bufsizes() can also be omitted since the initialization of smc sock and clcsock has set sk_{snd|rcv}buf to smc.sysctl_{w|r}mem or ipv4_sysctl_tcp_{w|r}mem[1]. Fixes: 30c3c4a4497c ("net/smc: Use correct buffer sizes when switching between TCP and SMC") Link: https://lore.kernel.org/r/5eaf3858-e7fd-4db8-83e8-3d7a3e0e9ae2@linux.alibaba.com Signed-off-by: Wen Gu Reviewed-by: Wenjia Zhang Reviewed-by: Gerd Bayer , too. Signed-off-by: David S. Miller --- net/smc/af_smc.c | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index e50a286fd0fb7..c5f98c6b25613 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -459,29 +459,11 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr, static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk, unsigned long mask) { - struct net *nnet = sock_net(nsk); - nsk->sk_userlocks = osk->sk_userlocks; - if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) { + if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) nsk->sk_sndbuf = osk->sk_sndbuf; - } else { - if (mask == SK_FLAGS_SMC_TO_CLC) - WRITE_ONCE(nsk->sk_sndbuf, - READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1])); - else - WRITE_ONCE(nsk->sk_sndbuf, - 2 * READ_ONCE(nnet->smc.sysctl_wmem)); - } - if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) { + if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) nsk->sk_rcvbuf = osk->sk_rcvbuf; - } else { - if (mask == SK_FLAGS_SMC_TO_CLC) - WRITE_ONCE(nsk->sk_rcvbuf, - READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1])); - else - WRITE_ONCE(nsk->sk_rcvbuf, - 2 * READ_ONCE(nnet->smc.sysctl_rmem)); - } } static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk, From 0a8d3f2e3e8d8aea8af017e14227b91d5989b696 Mon Sep 17 00:00:00 2001 From: Tristram Ha Date: Thu, 30 May 2024 18:38:01 -0700 Subject: [PATCH 295/537] net: phy: Micrel KSZ8061: fix errata solution not taking effect problem KSZ8061 needs to write to a MMD register at driver initialization to fix an errata. This worked in 5.0 kernel but not in newer kernels. The issue is the main phylib code no longer resets PHY at the very beginning. Calling phy resuming code later will reset the chip if it is already powered down at the beginning. This wipes out the MMD register write. Solution is to implement a phy resume function for KSZ8061 to take care of this problem. Fixes: 232ba3a51cc2 ("net: phy: Micrel KSZ8061: link failure after cable connect") Signed-off-by: Tristram Ha Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 42 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 8c20cf9375306..5aada7cf3da72 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -866,6 +866,17 @@ static int ksz8061_config_init(struct phy_device *phydev) { int ret; + /* Chip can be powered down by the bootstrap code. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (ret & BMCR_PDOWN) { + ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN); + if (ret < 0) + return ret; + usleep_range(1000, 2000); + } + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); if (ret) return ret; @@ -2135,6 +2146,35 @@ static int ksz9477_resume(struct phy_device *phydev) return 0; } +static int ksz8061_resume(struct phy_device *phydev) +{ + int ret; + + /* This function can be called twice when the Ethernet device is on. */ + ret = phy_read(phydev, MII_BMCR); + if (ret < 0) + return ret; + if (!(ret & BMCR_PDOWN)) + return 0; + + genphy_resume(phydev); + usleep_range(1000, 2000); + + /* Re-program the value after chip is reset. */ + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); + if (ret) + return ret; + + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } + + return 0; +} + static int kszphy_probe(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; @@ -5389,7 +5429,7 @@ static struct phy_driver ksphy_driver[] = { .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, .suspend = kszphy_suspend, - .resume = kszphy_resume, + .resume = ksz8061_resume, }, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, From 491aee894a08bc9b8bb52e7363b9d4bc6403f363 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 3 Jun 2024 04:57:55 +0000 Subject: [PATCH 296/537] ionic: fix kernel panic in XDP_TX action In the XDP_TX path, ionic driver sends a packet to the TX path with rx page and corresponding dma address. After tx is done, ionic_tx_clean() frees that page. But RX ring buffer isn't reset to NULL. So, it uses a freed page, which causes kernel panic. BUG: unable to handle page fault for address: ffff8881576c110c PGD 773801067 P4D 773801067 PUD 87f086067 PMD 87efca067 PTE 800ffffea893e060 Oops: Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC KASAN NOPTI CPU: 1 PID: 25 Comm: ksoftirqd/1 Not tainted 6.9.0+ #11 Hardware name: ASUS System Product Name/PRIME Z690-P D4, BIOS 0603 11/01/2021 RIP: 0010:bpf_prog_f0b8caeac1068a55_balancer_ingress+0x3b/0x44f Code: 00 53 41 55 41 56 41 57 b8 01 00 00 00 48 8b 5f 08 4c 8b 77 00 4c 89 f7 48 83 c7 0e 48 39 d8 RSP: 0018:ffff888104e6fa28 EFLAGS: 00010283 RAX: 0000000000000002 RBX: ffff8881576c1140 RCX: 0000000000000002 RDX: ffffffffc0051f64 RSI: ffffc90002d33048 RDI: ffff8881576c110e RBP: ffff888104e6fa88 R08: 0000000000000000 R09: ffffed1027a04a23 R10: 0000000000000000 R11: 0000000000000000 R12: ffff8881b03a21a8 R13: ffff8881589f800f R14: ffff8881576c1100 R15: 00000001576c1100 FS: 0000000000000000(0000) GS:ffff88881ae00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffff8881576c110c CR3: 0000000767a90000 CR4: 00000000007506f0 PKRU: 55555554 Call Trace: ? __die+0x20/0x70 ? page_fault_oops+0x254/0x790 ? __pfx_page_fault_oops+0x10/0x10 ? __pfx_is_prefetch.constprop.0+0x10/0x10 ? search_bpf_extables+0x165/0x260 ? fixup_exception+0x4a/0x970 ? exc_page_fault+0xcb/0xe0 ? asm_exc_page_fault+0x22/0x30 ? 0xffffffffc0051f64 ? bpf_prog_f0b8caeac1068a55_balancer_ingress+0x3b/0x44f ? do_raw_spin_unlock+0x54/0x220 ionic_rx_service+0x11ab/0x3010 [ionic 9180c3001ab627d82bbc5f3ebe8a0decaf6bb864] ? ionic_tx_clean+0x29b/0xc60 [ionic 9180c3001ab627d82bbc5f3ebe8a0decaf6bb864] ? __pfx_ionic_tx_clean+0x10/0x10 [ionic 9180c3001ab627d82bbc5f3ebe8a0decaf6bb864] ? __pfx_ionic_rx_service+0x10/0x10 [ionic 9180c3001ab627d82bbc5f3ebe8a0decaf6bb864] ? ionic_tx_cq_service+0x25d/0xa00 [ionic 9180c3001ab627d82bbc5f3ebe8a0decaf6bb864] ? __pfx_ionic_rx_service+0x10/0x10 [ionic 9180c3001ab627d82bbc5f3ebe8a0decaf6bb864] ionic_cq_service+0x69/0x150 [ionic 9180c3001ab627d82bbc5f3ebe8a0decaf6bb864] ionic_txrx_napi+0x11a/0x540 [ionic 9180c3001ab627d82bbc5f3ebe8a0decaf6bb864] __napi_poll.constprop.0+0xa0/0x440 net_rx_action+0x7e7/0xc30 ? __pfx_net_rx_action+0x10/0x10 Fixes: 8eeed8373e1c ("ionic: Add XDP_TX support") Signed-off-by: Taehee Yoo Reviewed-by: Shannon Nelson Reviewed-by: Brett Creeley Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 5dba6d2d633cb..2427610f4306d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -586,6 +586,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); goto out_xdp_abort; } + buf_info->page = NULL; stats->xdp_tx++; /* the Tx completion will free the buffers */ From affc18fdc694190ca7575b9a86632a73b9fe043d Mon Sep 17 00:00:00 2001 From: Hangyu Hua Date: Mon, 3 Jun 2024 15:13:03 +0800 Subject: [PATCH 297/537] net: sched: sch_multiq: fix possible OOB write in multiq_tune() q->bands will be assigned to qopt->bands to execute subsequent code logic after kmalloc. So the old q->bands should not be used in kmalloc. Otherwise, an out-of-bounds write will occur. Fixes: c2999f7fb05b ("net: sched: multiq: don't call qdisc_put() while holding tree lock") Signed-off-by: Hangyu Hua Acked-by: Cong Wang Signed-off-by: David S. Miller --- net/sched/sch_multiq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 79e93a19d5fab..06e03f5cd7ce1 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, qopt->bands = qdisc_dev(sch)->real_num_tx_queues; - removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands), + removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands), GFP_KERNEL); if (!removed) return -ENOMEM; From 1cd4bc987abb2823836cbb8f887026011ccddc8a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 3 Jun 2024 10:59:26 +0200 Subject: [PATCH 298/537] vxlan: Fix regression when dropping packets due to invalid src addresses Commit f58f45c1e5b9 ("vxlan: drop packets from invalid src-address") has recently been added to vxlan mainly in the context of source address snooping/learning so that when it is enabled, an entry in the FDB is not being created for an invalid address for the corresponding tunnel endpoint. Before commit f58f45c1e5b9 vxlan was similarly behaving as geneve in that it passed through whichever macs were set in the L2 header. It turns out that this change in behavior breaks setups, for example, Cilium with netkit in L3 mode for Pods as well as tunnel mode has been passing before the change in f58f45c1e5b9 for both vxlan and geneve. After mentioned change it is only passing for geneve as in case of vxlan packets are dropped due to vxlan_set_mac() returning false as source and destination macs are zero which for E/W traffic via tunnel is totally fine. Fix it by only opting into the is_valid_ether_addr() check in vxlan_set_mac() when in fact source address snooping/learning is actually enabled in vxlan. This is done by moving the check into vxlan_snoop(). With this change, the Cilium connectivity test suite passes again for both tunnel flavors. Fixes: f58f45c1e5b9 ("vxlan: drop packets from invalid src-address") Signed-off-by: Daniel Borkmann Cc: David Bauer Cc: Ido Schimmel Cc: Nikolay Aleksandrov Cc: Martin KaFai Lau Reviewed-by: Ido Schimmel Reviewed-by: Nikolay Aleksandrov Reviewed-by: David Bauer Signed-off-by: David S. Miller --- drivers/net/vxlan/vxlan_core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index f78dd0438843b..567cb3faab709 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev, struct vxlan_fdb *f; u32 ifindex = 0; + /* Ignore packets from invalid src-address */ + if (!is_valid_ether_addr(src_mac)) + return true; + #if IS_ENABLED(CONFIG_IPV6) if (src_ip->sa.sa_family == AF_INET6 && (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)) @@ -1616,10 +1620,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) return false; - /* Ignore packets from invalid src-address */ - if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) - return false; - /* Get address from the outer IP header */ if (vxlan_get_sk_family(vs) == AF_INET) { saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; From 712115a24b1a5318c10fc757d48d8f33815a6bfa Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Mon, 3 Jun 2024 17:30:19 +0800 Subject: [PATCH 299/537] selftests: hsr: add missing config for CONFIG_BRIDGE hsr_redbox.sh test need to create bridge for testing. Add the missing config CONFIG_BRIDGE in config file. Fixes: eafbf0574e05 ("test: hsr: Extend the hsr_redbox.sh to have more SAN devices connected") Signed-off-by: Hangbin Liu Tested-by: Simon Horman Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- tools/testing/selftests/net/hsr/config | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/net/hsr/config b/tools/testing/selftests/net/hsr/config index 22061204fb691..241542441c517 100644 --- a/tools/testing/selftests/net/hsr/config +++ b/tools/testing/selftests/net/hsr/config @@ -2,3 +2,4 @@ CONFIG_IPV6=y CONFIG_NET_SCH_NETEM=m CONFIG_HSR=y CONFIG_VETH=y +CONFIG_BRIDGE=y From 14951beaec93696b092a906baa0f29322cf34004 Mon Sep 17 00:00:00 2001 From: Wei Li Date: Tue, 23 Apr 2024 17:35:01 +0800 Subject: [PATCH 300/537] arm64: armv8_deprecated: Fix warning in isndep cpuhp starting process The function run_all_insn_set_hw_mode() is registered as startup callback of 'CPUHP_AP_ARM64_ISNDEP_STARTING', it invokes set_hw_mode() methods of all emulated instructions. As the STARTING callbacks are not expected to fail, if one of the set_hw_mode() fails, e.g. due to el0 mixed-endian is not supported for 'setend', it will report a warning: ``` CPU[2] cannot support the emulation of setend CPU 2 UP state arm64/isndep:starting (136) failed (-22) CPU2: Booted secondary processor 0x0000000002 [0x414fd0c1] ``` To fix it, add a check for INSN_UNAVAILABLE status and skip the process. Signed-off-by: Wei Li Tested-by: Huisong Li Link: https://lore.kernel.org/r/20240423093501.3460764-1-liwei391@huawei.com Signed-off-by: Will Deacon --- arch/arm64/kernel/armv8_deprecated.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index dd6ce86d4332b..b776e7424fe91 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -462,6 +462,9 @@ static int run_all_insn_set_hw_mode(unsigned int cpu) for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) { struct insn_emulation *insn = insn_emulations[i]; bool enable = READ_ONCE(insn->current_mode) == INSN_HW; + if (insn->status == INSN_UNAVAILABLE) + continue; + if (insn->set_hw_mode && insn->set_hw_mode(enable)) { pr_warn("CPU[%u] cannot support the emulation of %s", cpu, insn->name); From db574f2f96d0c9a245a9e787e3d9ec288fb2b445 Mon Sep 17 00:00:00 2001 From: Tao Su Date: Tue, 28 May 2024 18:22:34 +0800 Subject: [PATCH 301/537] KVM: x86/mmu: Don't save mmu_invalidate_seq after checking private attr Drop the second snapshot of mmu_invalidate_seq in kvm_faultin_pfn(). Before checking the mismatch of private vs. shared, mmu_invalidate_seq is saved to fault->mmu_seq, which can be used to detect an invalidation related to the gfn occurred, i.e. KVM will not install a mapping in page table if fault->mmu_seq != mmu_invalidate_seq. Currently there is a second snapshot of mmu_invalidate_seq, which may not be same as the first snapshot in kvm_faultin_pfn(), i.e. the gfn attribute may be changed between the two snapshots, but the gfn may be mapped in page table without hindrance. Therefore, drop the second snapshot as it has no obvious benefits. Fixes: f6adeae81f35 ("KVM: x86/mmu: Handle no-slot faults at the beginning of kvm_faultin_pfn()") Signed-off-by: Tao Su Message-ID: <20240528102234.2162763-1-tao1.su@linux.intel.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b7b4426a7221d..8d74bdef68c1d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4411,9 +4411,6 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, return RET_PF_EMULATE; } - fault->mmu_seq = vcpu->kvm->mmu_invalidate_seq; - smp_rmb(); - /* * Check for a relevant mmu_notifier invalidation event before getting * the pfn from the primary MMU, and before acquiring mmu_lock. From a46d0ea5c94205f40ecf912d1bb7806a8a64704f Mon Sep 17 00:00:00 2001 From: Jason Xing Date: Tue, 4 Jun 2024 01:02:16 +0800 Subject: [PATCH 302/537] tcp: count CLOSE-WAIT sockets for TCP_MIB_CURRESTAB According to RFC 1213, we should also take CLOSE-WAIT sockets into consideration: "tcpCurrEstab OBJECT-TYPE ... The number of TCP connections for which the current state is either ESTABLISHED or CLOSE- WAIT." After this, CurrEstab counter will display the total number of ESTABLISHED and CLOSE-WAIT sockets. The logic of counting When we increment the counter? a) if we change the state to ESTABLISHED. b) if we change the state from SYN-RECEIVED to CLOSE-WAIT. When we decrement the counter? a) if the socket leaves ESTABLISHED and will never go into CLOSE-WAIT, say, on the client side, changing from ESTABLISHED to FIN-WAIT-1. b) if the socket leaves CLOSE-WAIT, say, on the server side, changing from CLOSE-WAIT to LAST-ACK. Please note: there are two chances that old state of socket can be changed to CLOSE-WAIT in tcp_fin(). One is SYN-RECV, the other is ESTABLISHED. So we have to take care of the former case. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Jason Xing Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4d8cc2ebb64c7..e6790ea748773 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2649,6 +2649,10 @@ void tcp_set_state(struct sock *sk, int state) if (oldstate != TCP_ESTABLISHED) TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); break; + case TCP_CLOSE_WAIT: + if (oldstate == TCP_SYN_RECV) + TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); + break; case TCP_CLOSE: if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) @@ -2660,7 +2664,7 @@ void tcp_set_state(struct sock *sk, int state) inet_put_port(sk); fallthrough; default: - if (oldstate == TCP_ESTABLISHED) + if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); } From 9633e9377e6af0244f7381e86b9aac5276f5be97 Mon Sep 17 00:00:00 2001 From: Jason Xing Date: Tue, 4 Jun 2024 01:02:17 +0800 Subject: [PATCH 303/537] mptcp: count CLOSE-WAIT sockets for MPTCP_MIB_CURRESTAB Like previous patch does in TCP, we need to adhere to RFC 1213: "tcpCurrEstab OBJECT-TYPE ... The number of TCP connections for which the current state is either ESTABLISHED or CLOSE- WAIT." So let's consider CLOSE-WAIT sockets. The logic of counting When we increment the counter? a) Only if we change the state to ESTABLISHED. When we decrement the counter? a) if the socket leaves ESTABLISHED and will never go into CLOSE-WAIT, say, on the client side, changing from ESTABLISHED to FIN-WAIT-1. b) if the socket leaves CLOSE-WAIT, say, on the server side, changing from CLOSE-WAIT to LAST-ACK. Fixes: d9cd27b8cd19 ("mptcp: add CurrEstab MIB counter support") Signed-off-by: Jason Xing Reviewed-by: Matthieu Baerts (NGI0) Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 7d44196ec5b63..96b113854bd3c 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2916,9 +2916,14 @@ void mptcp_set_state(struct sock *sk, int state) if (oldstate != TCP_ESTABLISHED) MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB); break; - + case TCP_CLOSE_WAIT: + /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state: + * MPTCP "accepted" sockets will be created later on. So no + * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT. + */ + break; default: - if (oldstate == TCP_ESTABLISHED) + if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB); } From 5b4b62a169e10401cca34a6e7ac39161986f5605 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 3 Jun 2024 11:48:26 -0700 Subject: [PATCH 304/537] rtnetlink: make the "split" NLM_DONE handling generic Jaroslav reports Dell's OMSA Systems Management Data Engine expects NLM_DONE in a separate recvmsg(), both for rtnl_dump_ifinfo() and inet_dump_ifaddr(). We already added a similar fix previously in commit 460b0d33cf10 ("inet: bring NLM_DONE out to a separate recv() again") Instead of modifying all the dump handlers, and making them look different than modern for_each_netdev_dump()-based dump handlers - put the workaround in rtnetlink code. This will also help us move the custom rtnl-locking from af_netlink in the future (in net-next). Note that this change is not touching rtnl_dump_all(). rtnl_dump_all() is different kettle of fish and a potential problem. We now mix families in a single recvmsg(), but NLM_DONE is not coalesced. Tested: ./cli.py --dbg-small-recv 4096 --spec netlink/specs/rt_addr.yaml \ --dump getaddr --json '{"ifa-family": 2}' ./cli.py --dbg-small-recv 4096 --spec netlink/specs/rt_route.yaml \ --dump getroute --json '{"rtm-family": 2}' ./cli.py --dbg-small-recv 4096 --spec netlink/specs/rt_link.yaml \ --dump getlink Fixes: 3e41af90767d ("rtnetlink: use xarray iterator to implement rtnl_dump_ifinfo()") Fixes: cdb2f80f1c10 ("inet: use xa_array iterator to implement inet_dump_ifaddr()") Reported-by: Jaroslav Pulchart Link: https://lore.kernel.org/all/CAK8fFZ7MKoFSEzMBDAOjoUt+vTZRRQgLDNXEOfdCCXSoXXKE0g@mail.gmail.com Signed-off-by: Jakub Kicinski Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/rtnetlink.h | 1 + net/core/rtnetlink.c | 44 +++++++++++++++++++++++++++++++++++++++-- net/ipv4/devinet.c | 2 +- net/ipv4/fib_frontend.c | 7 +------ 4 files changed, 45 insertions(+), 9 deletions(-) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 3bfb80bad1739..b45d57b5968af 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -13,6 +13,7 @@ enum rtnl_link_flags { RTNL_FLAG_DOIT_UNLOCKED = BIT(0), RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1), RTNL_FLAG_DUMP_UNLOCKED = BIT(2), + RTNL_FLAG_DUMP_SPLIT_NLM_DONE = BIT(3), /* legacy behavior */ }; enum rtnl_kinds { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b86b0a87367dd..4668d67180407 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -6484,6 +6484,46 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, /* Process one rtnetlink message. */ +static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb) +{ + rtnl_dumpit_func dumpit = cb->data; + int err; + + /* Previous iteration have already finished, avoid calling->dumpit() + * again, it may not expect to be called after it reached the end. + */ + if (!dumpit) + return 0; + + err = dumpit(skb, cb); + + /* Old dump handlers used to send NLM_DONE as in a separate recvmsg(). + * Some applications which parse netlink manually depend on this. + */ + if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) { + if (err < 0 && err != -EMSGSIZE) + return err; + if (!err) + cb->data = NULL; + + return skb->len; + } + return err; +} + +static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb, + const struct nlmsghdr *nlh, + struct netlink_dump_control *control) +{ + if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) { + WARN_ON(control->data); + control->data = control->dump; + control->dump = rtnl_dumpit; + } + + return netlink_dump_start(ssk, skb, nlh, control); +} + static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { @@ -6548,7 +6588,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, .module = owner, .flags = flags, }; - err = netlink_dump_start(rtnl, skb, nlh, &c); + err = rtnetlink_dump_start(rtnl, skb, nlh, &c); /* netlink_dump_start() will keep a reference on * module if dump is still in progress. */ @@ -6694,7 +6734,7 @@ void __init rtnetlink_init(void) register_netdevice_notifier(&rtnetlink_dev_notifier); rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, - rtnl_dump_ifinfo, 0); + rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE); rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f3892ee9dfb33..d09f557eaa779 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -2805,7 +2805,7 @@ void __init devinet_init(void) rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0); rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0); rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, - RTNL_FLAG_DUMP_UNLOCKED); + RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE); rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf, inet_netconf_dump_devconf, RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c484b1c0fc00a..7ad2cafb92763 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1050,11 +1050,6 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb) e++; } } - - /* Don't let NLM_DONE coalesce into a message, even if it could. - * Some user space expects NLM_DONE in a separate recv(). - */ - err = skb->len; out: cb->args[1] = e; @@ -1665,5 +1660,5 @@ void __init ip_fib_init(void) rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0); rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0); rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, - RTNL_FLAG_DUMP_UNLOCKED); + RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE); } From 5c40e428aea644c9d924e491b1bc22fa9f272bcc Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 4 Jun 2024 22:59:57 +0200 Subject: [PATCH 305/537] arm64/io: add constant-argument check In some configurations __const_iowrite32_copy() does not get inlined and gcc runs into the BUILD_BUG(): In file included from : In function '__const_memcpy_toio_aligned32', inlined from '__const_iowrite32_copy' at arch/arm64/include/asm/io.h:203:3, inlined from '__const_iowrite32_copy' at arch/arm64/include/asm/io.h:199:20: include/linux/compiler_types.h:487:45: error: call to '__compiletime_assert_538' declared with attribute error: BUILD_BUG failed 487 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) | ^ include/linux/compiler_types.h:468:25: note: in definition of macro '__compiletime_assert' 468 | prefix ## suffix(); \ | ^~~~~~ include/linux/compiler_types.h:487:9: note: in expansion of macro '_compiletime_assert' 487 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) | ^~~~~~~~~~~~~~~~~~~ include/linux/build_bug.h:39:37: note: in expansion of macro 'compiletime_assert' 39 | #define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) | ^~~~~~~~~~~~~~~~~~ include/linux/build_bug.h:59:21: note: in expansion of macro 'BUILD_BUG_ON_MSG' 59 | #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") | ^~~~~~~~~~~~~~~~ arch/arm64/include/asm/io.h:193:17: note: in expansion of macro 'BUILD_BUG' 193 | BUILD_BUG(); | ^~~~~~~~~ Move the check for constant arguments into the inline function to ensure it is still constant if the compiler decides against inlining it, and mark them as __always_inline to override the logic that sometimes leads to the compiler not producing the simplified output. Note that either the __always_inline annotation or the check for a constant value are sufficient here, but combining the two looks cleaner as it also avoids the macro. With clang-8 and older, the macro was still needed, but all versions of gcc and clang can reliably perform constant folding here. Fixes: ead79118dae6 ("arm64/io: Provide a WC friendly __iowriteXX_copy()") Signed-off-by: Arnd Bergmann Reviewed-by: Mark Rutland Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20240604210006.668912-1-arnd@kernel.org Signed-off-by: Will Deacon --- arch/arm64/include/asm/io.h | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 4ff0ae3f6d669..41fd90895dfc3 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -153,8 +153,9 @@ extern void __memset_io(volatile void __iomem *, int, size_t); * emit the large TLP from the CPU. */ -static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to, - const u32 *from, size_t count) +static __always_inline void +__const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from, + size_t count) { switch (count) { case 8: @@ -196,24 +197,22 @@ static inline void __const_memcpy_toio_aligned32(volatile u32 __iomem *to, void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count); -static inline void __const_iowrite32_copy(void __iomem *to, const void *from, - size_t count) +static __always_inline void +__iowrite32_copy(void __iomem *to, const void *from, size_t count) { - if (count == 8 || count == 4 || count == 2 || count == 1) { + if (__builtin_constant_p(count) && + (count == 8 || count == 4 || count == 2 || count == 1)) { __const_memcpy_toio_aligned32(to, from, count); dgh(); } else { __iowrite32_copy_full(to, from, count); } } +#define __iowrite32_copy __iowrite32_copy -#define __iowrite32_copy(to, from, count) \ - (__builtin_constant_p(count) ? \ - __const_iowrite32_copy(to, from, count) : \ - __iowrite32_copy_full(to, from, count)) - -static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to, - const u64 *from, size_t count) +static __always_inline void +__const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from, + size_t count) { switch (count) { case 8: @@ -255,21 +254,18 @@ static inline void __const_memcpy_toio_aligned64(volatile u64 __iomem *to, void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count); -static inline void __const_iowrite64_copy(void __iomem *to, const void *from, - size_t count) +static __always_inline void +__iowrite64_copy(void __iomem *to, const void *from, size_t count) { - if (count == 8 || count == 4 || count == 2 || count == 1) { + if (__builtin_constant_p(count) && + (count == 8 || count == 4 || count == 2 || count == 1)) { __const_memcpy_toio_aligned64(to, from, count); dgh(); } else { __iowrite64_copy_full(to, from, count); } } - -#define __iowrite64_copy(to, from, count) \ - (__builtin_constant_p(count) ? \ - __const_iowrite64_copy(to, from, count) : \ - __iowrite64_copy_full(to, from, count)) +#define __iowrite64_copy __iowrite64_copy /* * I/O memory mapping functions. From c57e558194430d10d5e5f4acd8a8655b68dade13 Mon Sep 17 00:00:00 2001 From: Frank Wunderlich Date: Mon, 3 Jun 2024 21:25:05 +0200 Subject: [PATCH 306/537] net: ethernet: mtk_eth_soc: handle dma buffer size soc specific The mainline MTK ethernet driver suffers long time from rarly but annoying tx queue timeouts. We think that this is caused by fixed dma sizes hardcoded for all SoCs. We suspect this problem arises from a low level of free TX DMADs, the TX Ring alomost full. The transmit timeout is caused by the Tx queue not waking up. The Tx queue stops when the free counter is less than ring->thres, and it will wake up once the free counter is greater than ring->thres. If the CPU is too late to wake up the Tx queues, it may cause a transmit timeout. Therefore, we increased the TX and RX DMADs to improve this error situation. Use the dma-size implementation from SDK in a per SoC manner. In difference to SDK we have no RSS feature yet, so all RX/TX sizes should be raised from 512 to 2048 byte except fqdma on mt7988 to avoid the tx timeout issue. Fixes: 656e705243fd ("net-next: mediatek: add support for MT7623 ethernet") Suggested-by: Daniel Golle Signed-off-by: Frank Wunderlich Reviewed-by: Jacob Keller Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 104 +++++++++++++------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 9 +- 2 files changed, 77 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index cae46290a7aee..c84ce54a84a00 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) { const struct mtk_soc_data *soc = eth->soc; dma_addr_t phy_ring_tail; - int cnt = MTK_QDMA_RING_SIZE; + int cnt = soc->tx.fq_dma_size; dma_addr_t dma_addr; - int i; + int i, j, len; if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) eth->scratch_ring = eth->sram_base; @@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth) cnt * soc->tx.desc_size, ð->phy_scratch_ring, GFP_KERNEL); + if (unlikely(!eth->scratch_ring)) return -ENOMEM; - eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); - if (unlikely(!eth->scratch_head)) - return -ENOMEM; + phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); - dma_addr = dma_map_single(eth->dma_dev, - eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) - return -ENOMEM; + for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) { + len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH); + eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); - phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1); + if (unlikely(!eth->scratch_head[j])) + return -ENOMEM; - for (i = 0; i < cnt; i++) { - dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE; - struct mtk_tx_dma_v2 *txd; + dma_addr = dma_map_single(eth->dma_dev, + eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); - txd = eth->scratch_ring + i * soc->tx.desc_size; - txd->txd1 = addr; - if (i < cnt - 1) - txd->txd2 = eth->phy_scratch_ring + - (i + 1) * soc->tx.desc_size; + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) + return -ENOMEM; - txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); - if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) - txd->txd3 |= TX_DMA_PREP_ADDR64(addr); - txd->txd4 = 0; - if (mtk_is_netsys_v2_or_greater(eth)) { - txd->txd5 = 0; - txd->txd6 = 0; - txd->txd7 = 0; - txd->txd8 = 0; + for (i = 0; i < cnt; i++) { + struct mtk_tx_dma_v2 *txd; + + txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size; + txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; + if (j * MTK_FQ_DMA_LENGTH + i < cnt) + txd->txd2 = eth->phy_scratch_ring + + (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size; + + txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); + if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA)) + txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE); + + txd->txd4 = 0; + if (mtk_is_netsys_v2_or_greater(eth)) { + txd->txd5 = 0; + txd->txd6 = 0; + txd->txd7 = 0; + txd->txd8 = 0; + } } } @@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth) if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) ring_size = MTK_QDMA_RING_SIZE; else - ring_size = MTK_DMA_SIZE; + ring_size = soc->tx.dma_size; ring->buf = kcalloc(ring_size, sizeof(*ring->buf), GFP_KERNEL); @@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth) goto no_tx_mem; if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) { - ring->dma = eth->sram_base + ring_size * sz; - ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz; + ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz; + ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz; } else { ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, &ring->phys, GFP_KERNEL); @@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { const struct mtk_reg_map *reg_map = eth->soc->reg_map; + const struct mtk_soc_data *soc = eth->soc; struct mtk_rx_ring *ring; int rx_data_len, rx_dma_size, tx_ring_size; int i; @@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) tx_ring_size = MTK_QDMA_RING_SIZE; else - tx_ring_size = MTK_DMA_SIZE; + tx_ring_size = soc->tx.dma_size; if (rx_flag == MTK_RX_FLAGS_QDMA) { if (ring_no) @@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) rx_dma_size = MTK_HW_LRO_DMA_SIZE; } else { rx_data_len = ETH_DATA_LEN; - rx_dma_size = MTK_DMA_SIZE; + rx_dma_size = soc->rx.dma_size; } ring->frag_size = mtk_max_frag_size(rx_data_len); @@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth) mtk_rx_clean(eth, ð->rx_ring[i], false); } - kfree(eth->scratch_head); + for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) { + kfree(eth->scratch_head[i]); + eth->scratch_head[i] = NULL; + } } static bool mtk_hw_reset_check(struct mtk_eth *eth) @@ -5052,11 +5062,14 @@ static const struct mtk_soc_data mt2701_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5076,11 +5089,14 @@ static const struct mtk_soc_data mt7621_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5102,11 +5118,14 @@ static const struct mtk_soc_data mt7622_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5127,11 +5146,14 @@ static const struct mtk_soc_data mt7623_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5150,11 +5172,14 @@ static const struct mtk_soc_data mt7629_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), .irq_done_mask = MTK_RX_DONE_INT, .dma_l4_valid = RX_DMA_L4_VALID, + .dma_size = MTK_DMA_SIZE(2K), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, }, @@ -5176,6 +5201,8 @@ static const struct mtk_soc_data mt7981_data = { .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), @@ -5183,6 +5210,7 @@ static const struct mtk_soc_data mt7981_data = { .dma_l4_valid = RX_DMA_L4_VALID_V2, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5202,6 +5230,8 @@ static const struct mtk_soc_data mt7986_data = { .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), @@ -5209,6 +5239,7 @@ static const struct mtk_soc_data mt7986_data = { .dma_l4_valid = RX_DMA_L4_VALID_V2, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5228,6 +5259,8 @@ static const struct mtk_soc_data mt7988_data = { .desc_size = sizeof(struct mtk_tx_dma_v2), .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), + .fq_dma_size = MTK_DMA_SIZE(4K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma_v2), @@ -5235,6 +5268,7 @@ static const struct mtk_soc_data mt7988_data = { .dma_l4_valid = RX_DMA_L4_VALID_V2, .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, .dma_len_offset = 8, + .dma_size = MTK_DMA_SIZE(2K), }, }; @@ -5249,6 +5283,7 @@ static const struct mtk_soc_data rt5350_data = { .desc_size = sizeof(struct mtk_tx_dma), .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, .rx = { .desc_size = sizeof(struct mtk_rx_dma), @@ -5256,6 +5291,7 @@ static const struct mtk_soc_data rt5350_data = { .dma_l4_valid = RX_DMA_L4_VALID_PDMA, .dma_max_len = MTK_TX_DMA_BUF_LEN, .dma_len_offset = 16, + .dma_size = MTK_DMA_SIZE(2K), }, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 4eab30b440706..f5174f6cb1bbe 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -32,7 +32,9 @@ #define MTK_TX_DMA_BUF_LEN 0x3fff #define MTK_TX_DMA_BUF_LEN_V2 0xffff #define MTK_QDMA_RING_SIZE 2048 -#define MTK_DMA_SIZE 512 +#define MTK_DMA_SIZE(x) (SZ_##x) +#define MTK_FQ_DMA_HEAD 32 +#define MTK_FQ_DMA_LENGTH 2048 #define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) #define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) #define MTK_DMA_DUMMY_DESC 0xffffffff @@ -1176,6 +1178,8 @@ struct mtk_soc_data { u32 desc_size; u32 dma_max_len; u32 dma_len_offset; + u32 dma_size; + u32 fq_dma_size; } tx; struct { u32 desc_size; @@ -1183,6 +1187,7 @@ struct mtk_soc_data { u32 dma_l4_valid; u32 dma_max_len; u32 dma_len_offset; + u32 dma_size; } rx; }; @@ -1264,7 +1269,7 @@ struct mtk_eth { struct napi_struct rx_napi; void *scratch_ring; dma_addr_t phy_scratch_ring; - void *scratch_head; + void *scratch_head[MTK_FQ_DMA_HEAD]; struct clk *clks[MTK_CLK_MAX]; struct mii_bus *mii_bus; From 33afbfcc105a572159750f2ebee834a8a70fdd96 Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Tue, 4 Jun 2024 00:04:42 +0300 Subject: [PATCH 307/537] net/mlx5: Stop waiting for PCI if pci channel is offline In case pci channel becomes offline the driver should not wait for PCI reads during health dump and recovery flow. The driver has timeout for each of these loops trying to read PCI, so it would fail anyway. However, in case of recovery waiting till timeout may cause the pci error_detected() callback fail to meet pci_dpc_recovered() wait timeout. Fixes: b3bd076f7501 ("net/mlx5: Report devlink health on FW fatal issues") Signed-off-by: Moshe Shemesh Reviewed-by: Shay Drori Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 4 ++++ drivers/net/ethernet/mellanox/mlx5/core/health.c | 8 ++++++++ drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c | 4 ++++ 3 files changed, 16 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 2d95a9b7b44e1..b61b7d9661141 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) do { if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) break; + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n"); + return -EACCES; + } cond_resched(); } while (!time_after(jiffies, end)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index ad38e31822df1..a6329ca2d9bff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -248,6 +248,10 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev) do { if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) break; + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n"); + goto unlock; + } msleep(20); } while (!time_after(jiffies, end)); @@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev) mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n"); return -ENODEV; } + if (pci_channel_offline(dev->pdev)) { + mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n"); + return -EACCES; + } msleep(100); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c index 6b774e0c27665..d0b595ba61101 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c @@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev) ret = -EBUSY; goto pci_unlock; } + if (pci_channel_offline(dev->pdev)) { + ret = -EACCES; + goto pci_unlock; + } /* Check if semaphore is already locked */ ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val); From c8b3f38d2dae0397944814d691a419c451f9906f Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Tue, 4 Jun 2024 00:04:43 +0300 Subject: [PATCH 308/537] net/mlx5: Always stop health timer during driver removal Currently, if teardown_hca fails to execute during driver removal, mlx5 does not stop the health timer. Afterwards, mlx5 continue with driver teardown. This may lead to a UAF bug, which results in page fault Oops[1], since the health timer invokes after resources were freed. Hence, stop the health monitor even if teardown_hca fails. [1] mlx5_core 0000:18:00.0: E-Switch: Unload vfs: mode(LEGACY), nvfs(0), necvfs(0), active vports(0) mlx5_core 0000:18:00.0: E-Switch: Disable: mode(LEGACY), nvfs(0), necvfs(0), active vports(0) mlx5_core 0000:18:00.0: E-Switch: Disable: mode(LEGACY), nvfs(0), necvfs(0), active vports(0) mlx5_core 0000:18:00.0: E-Switch: cleanup mlx5_core 0000:18:00.0: wait_func:1155:(pid 1967079): TEARDOWN_HCA(0x103) timeout. Will cause a leak of a command resource mlx5_core 0000:18:00.0: mlx5_function_close:1288:(pid 1967079): tear_down_hca failed, skip cleanup BUG: unable to handle page fault for address: ffffa26487064230 PGD 100c00067 P4D 100c00067 PUD 100e5a067 PMD 105ed7067 PTE 0 Oops: 0000 [#1] PREEMPT SMP PTI CPU: 0 PID: 0 Comm: swapper/0 Tainted: G OE ------- --- 6.7.0-68.fc38.x86_64 #1 Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0013.121520200651 12/15/2020 RIP: 0010:ioread32be+0x34/0x60 RSP: 0018:ffffa26480003e58 EFLAGS: 00010292 RAX: ffffa26487064200 RBX: ffff9042d08161a0 RCX: ffff904c108222c0 RDX: 000000010bbf1b80 RSI: ffffffffc055ddb0 RDI: ffffa26487064230 RBP: ffff9042d08161a0 R08: 0000000000000022 R09: ffff904c108222e8 R10: 0000000000000004 R11: 0000000000000441 R12: ffffffffc055ddb0 R13: ffffa26487064200 R14: ffffa26480003f00 R15: ffff904c108222c0 FS: 0000000000000000(0000) GS:ffff904c10800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffa26487064230 CR3: 00000002c4420006 CR4: 00000000007706f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: ? __die+0x23/0x70 ? page_fault_oops+0x171/0x4e0 ? exc_page_fault+0x175/0x180 ? asm_exc_page_fault+0x26/0x30 ? __pfx_poll_health+0x10/0x10 [mlx5_core] ? __pfx_poll_health+0x10/0x10 [mlx5_core] ? ioread32be+0x34/0x60 mlx5_health_check_fatal_sensors+0x20/0x100 [mlx5_core] ? __pfx_poll_health+0x10/0x10 [mlx5_core] poll_health+0x42/0x230 [mlx5_core] ? __next_timer_interrupt+0xbc/0x110 ? __pfx_poll_health+0x10/0x10 [mlx5_core] call_timer_fn+0x21/0x130 ? __pfx_poll_health+0x10/0x10 [mlx5_core] __run_timers+0x222/0x2c0 run_timer_softirq+0x1d/0x40 __do_softirq+0xc9/0x2c8 __irq_exit_rcu+0xa6/0xc0 sysvec_apic_timer_interrupt+0x72/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 RIP: 0010:cpuidle_enter_state+0xcc/0x440 ? cpuidle_enter_state+0xbd/0x440 cpuidle_enter+0x2d/0x40 do_idle+0x20d/0x270 cpu_startup_entry+0x2a/0x30 rest_init+0xd0/0xd0 arch_call_rest_init+0xe/0x30 start_kernel+0x709/0xa90 x86_64_start_reservations+0x18/0x30 x86_64_start_kernel+0x96/0xa0 secondary_startup_64_no_verify+0x18f/0x19b ---[ end trace 0000000000000000 ]--- Fixes: 9b98d395b85d ("net/mlx5: Start health poll at earlier stage of driver load") Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 6574c145dc1e2..459a836a5d9c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot) if (!err) mlx5_function_disable(dev, boot); + else + mlx5_stop_health_poll(dev, boot); + return err; } From 74751ef5c1912ebd3e65c3b65f45587e05ce5d36 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Mon, 13 May 2024 10:39:48 +0000 Subject: [PATCH 309/537] perf/core: Fix missing wakeup when waiting for context reference In our production environment, we found many hung tasks which are blocked for more than 18 hours. Their call traces are like this: [346278.191038] __schedule+0x2d8/0x890 [346278.191046] schedule+0x4e/0xb0 [346278.191049] perf_event_free_task+0x220/0x270 [346278.191056] ? init_wait_var_entry+0x50/0x50 [346278.191060] copy_process+0x663/0x18d0 [346278.191068] kernel_clone+0x9d/0x3d0 [346278.191072] __do_sys_clone+0x5d/0x80 [346278.191076] __x64_sys_clone+0x25/0x30 [346278.191079] do_syscall_64+0x5c/0xc0 [346278.191083] ? syscall_exit_to_user_mode+0x27/0x50 [346278.191086] ? do_syscall_64+0x69/0xc0 [346278.191088] ? irqentry_exit_to_user_mode+0x9/0x20 [346278.191092] ? irqentry_exit+0x19/0x30 [346278.191095] ? exc_page_fault+0x89/0x160 [346278.191097] ? asm_exc_page_fault+0x8/0x30 [346278.191102] entry_SYSCALL_64_after_hwframe+0x44/0xae The task was waiting for the refcount become to 1, but from the vmcore, we found the refcount has already been 1. It seems that the task didn't get woken up by perf_event_release_kernel() and got stuck forever. The below scenario may cause the problem. Thread A Thread B ... ... perf_event_free_task perf_event_release_kernel ... acquire event->child_mutex ... get_ctx ... release event->child_mutex acquire ctx->mutex ... perf_free_event (acquire/release event->child_mutex) ... release ctx->mutex wait_var_event acquire ctx->mutex acquire event->child_mutex # move existing events to free_list release event->child_mutex release ctx->mutex put_ctx ... ... In this case, all events of the ctx have been freed, so we couldn't find the ctx in free_list and Thread A will miss the wakeup. It's thus necessary to add a wakeup after dropping the reference. Fixes: 1cf8dfe8a661 ("perf/core: Fix race between close() and fork()") Signed-off-by: Haifeng Xu Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Frederic Weisbecker Acked-by: Mark Rutland Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20240513103948.33570-1-haifeng.xu@shopee.com --- kernel/events/core.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/kernel/events/core.c b/kernel/events/core.c index f0128c5ff2789..8f908f0779354 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5384,6 +5384,7 @@ int perf_event_release_kernel(struct perf_event *event) again: mutex_lock(&event->child_mutex); list_for_each_entry(child, &event->child_list, child_list) { + void *var = NULL; /* * Cannot change, child events are not migrated, see the @@ -5424,11 +5425,23 @@ int perf_event_release_kernel(struct perf_event *event) * this can't be the last reference. */ put_event(event); + } else { + var = &ctx->refcount; } mutex_unlock(&event->child_mutex); mutex_unlock(&ctx->mutex); put_ctx(ctx); + + if (var) { + /* + * If perf_event_free_task() has deleted all events from the + * ctx while the child_mutex got released above, make sure to + * notify about the preceding put_ctx(). + */ + smp_mb(); /* pairs with wait_var_event() */ + wake_up_var(var); + } goto again; } mutex_unlock(&event->child_mutex); From f92a59f6d12e31ead999fee9585471b95a8ae8a3 Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Wed, 15 May 2024 13:37:10 +0000 Subject: [PATCH 310/537] locking/atomic: scripts: fix ${atomic}_sub_and_test() kerneldoc For ${atomic}_sub_and_test() the @i parameter is the value to subtract, not add. Fix the typo in the kerneldoc template and generate the headers with this update. Fixes: ad8110706f38 ("locking/atomic: scripts: generate kerneldoc comments") Suggested-by: Mark Rutland Signed-off-by: Carlos Llamas Signed-off-by: Peter Zijlstra (Intel) Acked-by: Mark Rutland Reviewed-by: Kees Cook Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20240515133844.3502360-1-cmllamas@google.com --- include/linux/atomic/atomic-arch-fallback.h | 6 +++--- include/linux/atomic/atomic-instrumented.h | 8 ++++---- include/linux/atomic/atomic-long.h | 4 ++-- scripts/atomic/kerneldoc/sub_and_test | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index 956bcba5dbf2f..2f9d36b72bd89 100644 --- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -2242,7 +2242,7 @@ raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) /** * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering - * @i: int value to add + * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with full ordering. @@ -4368,7 +4368,7 @@ raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) /** * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering - * @i: s64 value to add + * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with full ordering. @@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v) } #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 14850c0b0db20c62fdc78ccd1d42b98b88d76331 +// b565db590afeeff0d7c9485ccbca5bb6e155749f diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index debd487fe9716..9409a6ddf3e0d 100644 --- a/include/linux/atomic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -1349,7 +1349,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) /** * atomic_sub_and_test() - atomic subtract and test if zero with full ordering - * @i: int value to add + * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with full ordering. @@ -2927,7 +2927,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) /** * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering - * @i: s64 value to add + * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with full ordering. @@ -4505,7 +4505,7 @@ atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) /** * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering - * @i: long value to add + * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with full ordering. @@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v) #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ -// ce5b65e0f1f8a276268b667194581d24bed219d4 +// 8829b337928e9508259079d32581775ececd415b diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h index 3ef844b3ab8a3..f86b29d908775 100644 --- a/include/linux/atomic/atomic-long.h +++ b/include/linux/atomic/atomic-long.h @@ -1535,7 +1535,7 @@ raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) /** * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering - * @i: long value to add + * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with full ordering. @@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v) } #endif /* _LINUX_ATOMIC_LONG_H */ -// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a +// eadf183c3600b8b92b91839dd3be6bcc560c752d diff --git a/scripts/atomic/kerneldoc/sub_and_test b/scripts/atomic/kerneldoc/sub_and_test index d3760f7749d4e..96615e50836b0 100644 --- a/scripts/atomic/kerneldoc/sub_and_test +++ b/scripts/atomic/kerneldoc/sub_and_test @@ -1,7 +1,7 @@ cat < Date: Tue, 4 Jun 2024 11:00:22 -0300 Subject: [PATCH 311/537] Revert "perf record: Reduce memory for recording PERF_RECORD_LOST_SAMPLES event" This reverts commit 7d1405c71df21f6c394b8a885aa8a133f749fa22. This causes segfaults in some cases, as reported by Milian: ``` sudo /usr/bin/perf record -z --call-graph dwarf -e cycles -e raw_syscalls:sys_enter ls ... [ perf record: Woken up 3 times to write data ] malloc(): invalid next size (unsorted) Aborted ``` Backtrace with GDB + debuginfod: ``` malloc(): invalid next size (unsorted) Thread 1 "perf" received signal SIGABRT, Aborted. __pthread_kill_implementation (threadid=, signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44 Downloading source file /usr/src/debug/glibc/glibc/nptl/pthread_kill.c 44 return INTERNAL_SYSCALL_ERROR_P (ret) ? INTERNAL_SYSCALL_ERRNO (ret) : 0; (gdb) bt #0 __pthread_kill_implementation (threadid=, signo=signo@entry=6, no_tid=no_tid@entry=0) at pthread_kill.c:44 #1 0x00007ffff6ea8eb3 in __pthread_kill_internal (threadid=, signo=6) at pthread_kill.c:78 #2 0x00007ffff6e50a30 in __GI_raise (sig=sig@entry=6) at ../sysdeps/posix/ raise.c:26 #3 0x00007ffff6e384c3 in __GI_abort () at abort.c:79 #4 0x00007ffff6e39354 in __libc_message_impl (fmt=fmt@entry=0x7ffff6fc22ea "%s\n") at ../sysdeps/posix/libc_fatal.c:132 #5 0x00007ffff6eb3085 in malloc_printerr (str=str@entry=0x7ffff6fc5850 "malloc(): invalid next size (unsorted)") at malloc.c:5772 #6 0x00007ffff6eb657c in _int_malloc (av=av@entry=0x7ffff6ff6ac0 , bytes=bytes@entry=368) at malloc.c:4081 #7 0x00007ffff6eb877e in __libc_calloc (n=, elem_size=) at malloc.c:3754 #8 0x000055555569bdb6 in perf_session.do_write_header () #9 0x00005555555a373a in __cmd_record.constprop.0 () #10 0x00005555555a6846 in cmd_record () #11 0x000055555564db7f in run_builtin () #12 0x000055555558ed77 in main () ``` Valgrind memcheck: ``` ==45136== Invalid write of size 8 ==45136== at 0x2B38A5: perf_event__synthesize_id_sample (in /usr/bin/perf) ==45136== by 0x157069: __cmd_record.constprop.0 (in /usr/bin/perf) ==45136== by 0x15A845: cmd_record (in /usr/bin/perf) ==45136== by 0x201B7E: run_builtin (in /usr/bin/perf) ==45136== by 0x142D76: main (in /usr/bin/perf) ==45136== Address 0x6a866a8 is 0 bytes after a block of size 40 alloc'd ==45136== at 0x4849BF3: calloc (vg_replace_malloc.c:1675) ==45136== by 0x3574AB: zalloc (in /usr/bin/perf) ==45136== by 0x1570E0: __cmd_record.constprop.0 (in /usr/bin/perf) ==45136== by 0x15A845: cmd_record (in /usr/bin/perf) ==45136== by 0x201B7E: run_builtin (in /usr/bin/perf) ==45136== by 0x142D76: main (in /usr/bin/perf) ==45136== ==45136== Syscall param write(buf) points to unaddressable byte(s) ==45136== at 0x575953D: __libc_write (write.c:26) ==45136== by 0x575953D: write (write.c:24) ==45136== by 0x35761F: ion (in /usr/bin/perf) ==45136== by 0x357778: writen (in /usr/bin/perf) ==45136== by 0x1548F7: record__write (in /usr/bin/perf) ==45136== by 0x15708A: __cmd_record.constprop.0 (in /usr/bin/perf) ==45136== by 0x15A845: cmd_record (in /usr/bin/perf) ==45136== by 0x201B7E: run_builtin (in /usr/bin/perf) ==45136== by 0x142D76: main (in /usr/bin/perf) ==45136== Address 0x6a866a8 is 0 bytes after a block of size 40 alloc'd ==45136== at 0x4849BF3: calloc (vg_replace_malloc.c:1675) ==45136== by 0x3574AB: zalloc (in /usr/bin/perf) ==45136== by 0x1570E0: __cmd_record.constprop.0 (in /usr/bin/perf) ==45136== by 0x15A845: cmd_record (in /usr/bin/perf) ==45136== by 0x201B7E: run_builtin (in /usr/bin/perf) ==45136== by 0x142D76: main (in /usr/bin/perf) ==45136== ----- Closes: https://lore.kernel.org/linux-perf-users/23879991.0LEYPuXRzz@milian-workstation/ Reported-by: Milian Wolff Tested-by: Milian Wolff Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Namhyung Kim Cc: stable@kernel.org # 6.8+ Link: https://lore.kernel.org/lkml/Zl9ksOlHJHnKM70p@x1 Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 66a3de8ac6618..0a8ba1323d64b 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -1956,8 +1956,7 @@ static void record__read_lost_samples(struct record *rec) if (count.lost) { if (!lost) { - lost = zalloc(sizeof(*lost) + - session->machines.host.id_hdr_size); + lost = zalloc(PERF_SAMPLE_MAX_SIZE); if (!lost) { pr_debug("Memory allocation failed\n"); return; @@ -1973,8 +1972,7 @@ static void record__read_lost_samples(struct record *rec) lost_count = perf_bpf_filter__lost_count(evsel); if (lost_count) { if (!lost) { - lost = zalloc(sizeof(*lost) + - session->machines.host.id_hdr_size); + lost = zalloc(PERF_SAMPLE_MAX_SIZE); if (!lost) { pr_debug("Memory allocation failed\n"); return; From ca9680821dfec73c9100860bda4fab1f1309722e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 21 May 2024 10:07:40 -0700 Subject: [PATCH 312/537] perf bpf: Fix handling of minimal vmlinux.h file when interrupting the build Ingo reported that he was seeing these when hitting Control+C during a perf tools build: Makefile.perf:1149: *** Missing bpftool input for generating vmlinux.h. Stop. The failure happens when you don't have vmlinux.h or vmlinux with BTF. ifeq ($(VMLINUX_H),) ifeq ($(VMLINUX_BTF),) $(error Missing bpftool input for generating vmlinux.h) endif endif VMLINUX_BTF can be empty if you didn't build a kernel or it doesn't have a BTF section and the current kernel also has no BTF. This is totally ok. But VMLINUX_H should be set to the minimal version in the source tree (unless you overwrite it manually) when you don't pass GEN_VMLINUX_H=1 (which requires VMLINUX_BTF should not be empty). The problem is that it's defined in Makefile.config which is not included for `make clean`. Reported-by: Ingo Molnar Signed-off-by: Namhyung Kim Tested-by: Ingo Molnar Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Link: http://lore.kernel.org/lkml/CAM9d7ch5HTr+k+_GpbMrX0HUo5BZ11byh1xq0Two7B7RQACuNw@mail.gmail.com Link: http://lore.kernel.org/lkml/ZjssGrj+abyC6mYP@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.perf | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 5c35c0d893069..e6d56b5553695 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -214,6 +214,7 @@ NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help ifdef MAKECMDGOALS ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),) + VMLINUX_H=$(src-perf)/util/bpf_skel/vmlinux/vmlinux.h config := 0 endif endif From 9a64e1bfd8a10c015c41fe0d289e89862486c50d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 2 Jun 2024 22:52:24 -0400 Subject: [PATCH 313/537] bcachefs: Fix GFP_KERNEL allocation in break_cycle() Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_locking.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index c3e9b0cc7bbdc..d66fff22109ae 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -215,6 +215,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle) if (unlikely(!best)) { struct printbuf buf = PRINTBUF; + buf.atomic++; prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks")); From fdccb24352e589bb59c9ba90f23c4e0994b90518 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sun, 2 Jun 2024 22:25:18 -0400 Subject: [PATCH 314/537] bcachefs: Rereplicate now moves data off of durability=0 devices This fixes an issue where setting a device to durability=0 after it's been used makes it impossible to remove. Signed-off-by: Kent Overstreet --- fs/bcachefs/move.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 8171f947fac8f..2706d82423450 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -920,7 +920,20 @@ static bool rereplicate_pred(struct bch_fs *c, void *arg, ? c->opts.metadata_replicas : io_opts->data_replicas; - if (!nr_good || nr_good >= replicas) + rcu_read_lock(); + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + unsigned i = 0; + bkey_for_each_ptr(ptrs, ptr) { + struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); + if (!ptr->cached && + (!ca || !ca->mi.durability)) + data_opts->kill_ptrs |= BIT(i); + i++; + } + rcu_read_unlock(); + + if (!data_opts->kill_ptrs && + (!nr_good || nr_good >= replicas)) return false; data_opts->target = 0; From 319fef29e96524966bb8593117ce0c5867846eea Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 3 Jun 2024 18:00:48 -0400 Subject: [PATCH 315/537] bcachefs: Fix trans->locked assert in bch2_move_data_btree, we might start with the trans unlocked from a previous loop iteration - we need a trans_begin() before iter_init(). Signed-off-by: Kent Overstreet --- fs/bcachefs/move.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 2706d82423450..6e477fadaa2a5 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -547,6 +547,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt, ctxt->stats->pos = BBPOS(btree_id, start); } + bch2_trans_begin(trans); bch2_trans_iter_init(trans, &iter, btree_id, start, BTREE_ITER_prefetch| BTREE_ITER_all_snapshots); From d38e48563c1f70460503de7ffff9a7f46b54b67d Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 3 Jun 2024 21:03:42 +0200 Subject: [PATCH 316/537] s390/crash: Do not use VM info if os_info does not have it The virtual memory information stored in os_info area is required for creation of the kernel image PT_LOAD program header for kernels since commit a2ec5bec56dd ("s390/mm: uncouple physical vs virtual address spaces"). By contrast, if such information in os_info is absent the PT_LOAD program header should not be created. Currently the proper PT_LOAD program header is created for kernels that contain the virtual memory information, but for kernels without one an invalid header of zero size is created. That in turn leads to stand-alone dump failures. Use OS_INFO_KASLR_OFFSET variable to check whether os_info is present or not (same as crash and makedumpfile tools do) and based on that create or do not create the kernel image PT_LOAD program header. Fixes: f4cac27dc0d6 ("s390/crash: Use old os_info to create PT_LOAD headers") Tested-by: Mikhail Zaslonko Acked-by: Mikhail Zaslonko Acked-by: Heiko Carstens Signed-off-by: Alexander Gordeev --- arch/s390/kernel/crash_dump.c | 54 +++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 9863ebe75019a..edae134161960 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -451,7 +451,7 @@ static void *nt_final(void *ptr) /* * Initialize ELF header (new kernel) */ -static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) +static void *ehdr_init(Elf64_Ehdr *ehdr, int phdr_count) { memset(ehdr, 0, sizeof(*ehdr)); memcpy(ehdr->e_ident, ELFMAG, SELFMAG); @@ -465,11 +465,8 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) ehdr->e_phoff = sizeof(Elf64_Ehdr); ehdr->e_ehsize = sizeof(Elf64_Ehdr); ehdr->e_phentsize = sizeof(Elf64_Phdr); - /* - * Number of memory chunk PT_LOAD program headers plus one kernel - * image PT_LOAD program header plus one PT_NOTE program header. - */ - ehdr->e_phnum = mem_chunk_cnt + 1 + 1; + /* Number of PT_LOAD program headers plus PT_NOTE program header */ + ehdr->e_phnum = phdr_count + 1; return ehdr + 1; } @@ -503,12 +500,14 @@ static int get_mem_chunk_cnt(void) /* * Initialize ELF loads (new kernel) */ -static void loads_init(Elf64_Phdr *phdr) +static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm) { - unsigned long old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE); + unsigned long old_identity_base = 0; phys_addr_t start, end; u64 idx; + if (os_info_has_vm) + old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE); for_each_physmem_range(idx, &oldmem_type, &start, &end) { phdr->p_type = PT_LOAD; phdr->p_vaddr = old_identity_base + start; @@ -522,6 +521,11 @@ static void loads_init(Elf64_Phdr *phdr) } } +static bool os_info_has_vm(void) +{ + return os_info_old_value(OS_INFO_KASLR_OFFSET); +} + /* * Prepare PT_LOAD type program header for kernel image region */ @@ -566,7 +570,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) return ptr; } -static size_t get_elfcorehdr_size(int mem_chunk_cnt) +static size_t get_elfcorehdr_size(int phdr_count) { size_t size; @@ -581,10 +585,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt) size += nt_vmcoreinfo_size(); /* nt_final */ size += sizeof(Elf64_Nhdr); - /* PT_LOAD type program header for kernel text region */ - size += sizeof(Elf64_Phdr); /* PT_LOADS */ - size += mem_chunk_cnt * sizeof(Elf64_Phdr); + size += phdr_count * sizeof(Elf64_Phdr); return size; } @@ -595,8 +597,8 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt) int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) { Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text; + int mem_chunk_cnt, phdr_text_cnt; size_t alloc_size; - int mem_chunk_cnt; void *ptr, *hdr; u64 hdr_off; @@ -615,12 +617,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) } mem_chunk_cnt = get_mem_chunk_cnt(); + phdr_text_cnt = os_info_has_vm() ? 1 : 0; - alloc_size = get_elfcorehdr_size(mem_chunk_cnt); + alloc_size = get_elfcorehdr_size(mem_chunk_cnt + phdr_text_cnt); hdr = kzalloc(alloc_size, GFP_KERNEL); - /* Without elfcorehdr /proc/vmcore cannot be created. Thus creating + /* + * Without elfcorehdr /proc/vmcore cannot be created. Thus creating * a dump with this crash kernel will fail. Panic now to allow other * dump mechanisms to take over. */ @@ -628,21 +632,23 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) panic("s390 kdump allocating elfcorehdr failed"); /* Init elf header */ - ptr = ehdr_init(hdr, mem_chunk_cnt); + phdr_notes = ehdr_init(hdr, mem_chunk_cnt + phdr_text_cnt); /* Init program headers */ - phdr_notes = ptr; - ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr)); - phdr_text = ptr; - ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr)); - phdr_loads = ptr; - ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt); + if (phdr_text_cnt) { + phdr_text = phdr_notes + 1; + phdr_loads = phdr_text + 1; + } else { + phdr_loads = phdr_notes + 1; + } + ptr = PTR_ADD(phdr_loads, sizeof(Elf64_Phdr) * mem_chunk_cnt); /* Init notes */ hdr_off = PTR_DIFF(ptr, hdr); ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off); /* Init kernel text program header */ - text_init(phdr_text); + if (phdr_text_cnt) + text_init(phdr_text); /* Init loads */ - loads_init(phdr_loads); + loads_init(phdr_loads, phdr_text_cnt); /* Finalize program headers */ hdr_off = PTR_DIFF(ptr, hdr); *addr = (unsigned long long) hdr; From 01c51a32dc18f128d2e55a7b2128b77fc01a2285 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 12 Apr 2024 10:43:29 +0200 Subject: [PATCH 317/537] KVM: s390x: selftests: Add shared zeropage test Let's test that we can have shared zeropages in our process as long as storage keys are not getting used, that shared zeropages are properly unshared (replaced by anonymous pages) once storage keys are enabled, and that no new shared zeropages are populated after storage keys were enabled. We require the new pagemap interface to detect the shared zeropage. On an old kernel (zeropages always disabled): # ./s390x/shared_zeropage_test TAP version 13 1..3 not ok 1 Shared zeropages should be enabled ok 2 Shared zeropage should be gone ok 3 Shared zeropages should be disabled # Totals: pass:2 fail:1 xfail:0 xpass:0 skip:0 error:0 On a fixed kernel: # ./s390x/shared_zeropage_test TAP version 13 1..3 ok 1 Shared zeropages should be enabled ok 2 Shared zeropage should be gone ok 3 Shared zeropages should be disabled # Totals: pass:3 fail:0 xfail:0 xpass:0 skip:0 error:0 Testing of UFFDIO_ZEROPAGE can be added later. [ agordeev: Fixed checkpatch complaint, added ucall_common.h include ] Cc: Christian Borntraeger Cc: Janosch Frank Cc: Claudio Imbrenda Cc: Thomas Huth Cc: Alexander Gordeev Cc: Paolo Bonzini Cc: Shuah Khan Signed-off-by: David Hildenbrand Acked-by: Christian Borntraeger Acked-by: Muhammad Usama Anjum Tested-by: Alexander Gordeev Link: https://lore.kernel.org/r/20240412084329.30315-1-david@redhat.com Signed-off-by: Alexander Gordeev --- tools/testing/selftests/kvm/Makefile | 1 + .../kvm/s390x/shared_zeropage_test.c | 111 ++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 tools/testing/selftests/kvm/s390x/shared_zeropage_test.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index ce8ff8e8ce3a2..ac280dcba996d 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -183,6 +183,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test TEST_GEN_PROGS_s390x += s390x/tprot TEST_GEN_PROGS_s390x += s390x/cmma_test TEST_GEN_PROGS_s390x += s390x/debug_test +TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test TEST_GEN_PROGS_s390x += demand_paging_test TEST_GEN_PROGS_s390x += dirty_log_test TEST_GEN_PROGS_s390x += guest_print_test diff --git a/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c b/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c new file mode 100644 index 0000000000000..bba0d9a6dcc87 --- /dev/null +++ b/tools/testing/selftests/kvm/s390x/shared_zeropage_test.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Test shared zeropage handling (with/without storage keys) + * + * Copyright (C) 2024, Red Hat, Inc. + */ +#include + +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "kselftest.h" +#include "ucall_common.h" + +static void set_storage_key(void *addr, uint8_t skey) +{ + asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); +} + +static void guest_code(void) +{ + /* Issue some storage key instruction. */ + set_storage_key((void *)0, 0x98); + GUEST_DONE(); +} + +/* + * Returns 1 if the shared zeropage is mapped, 0 if something else is mapped. + * Returns < 0 on error or if nothing is mapped. + */ +static int maps_shared_zeropage(int pagemap_fd, void *addr) +{ + struct page_region region; + struct pm_scan_arg arg = { + .start = (uintptr_t)addr, + .end = (uintptr_t)addr + 4096, + .vec = (uintptr_t)®ion, + .vec_len = 1, + .size = sizeof(struct pm_scan_arg), + .category_mask = PAGE_IS_PFNZERO, + .category_anyof_mask = PAGE_IS_PRESENT, + .return_mask = PAGE_IS_PFNZERO, + }; + return ioctl(pagemap_fd, PAGEMAP_SCAN, &arg); +} + +int main(int argc, char *argv[]) +{ + char *mem, *page0, *page1, *page2, tmp; + const size_t pagesize = getpagesize(); + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + struct ucall uc; + int pagemap_fd; + + ksft_print_header(); + ksft_set_plan(3); + + /* + * We'll use memory that is not mapped into the VM for simplicity. + * Shared zeropages are enabled/disabled per-process. + */ + mem = mmap(0, 3 * pagesize, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap() failed"); + + /* Disable THP. Ignore errors on older kernels. */ + madvise(mem, 3 * pagesize, MADV_NOHUGEPAGE); + + page0 = mem; + page1 = page0 + pagesize; + page2 = page1 + pagesize; + + /* Can we even detect shared zeropages? */ + pagemap_fd = open("/proc/self/pagemap", O_RDONLY); + TEST_REQUIRE(pagemap_fd >= 0); + + tmp = *page0; + asm volatile("" : "+r" (tmp)); + TEST_REQUIRE(maps_shared_zeropage(pagemap_fd, page0) == 1); + + vm = vm_create_with_one_vcpu(&vcpu, guest_code); + + /* Verify that we get the shared zeropage after VM creation. */ + tmp = *page1; + asm volatile("" : "+r" (tmp)); + ksft_test_result(maps_shared_zeropage(pagemap_fd, page1) == 1, + "Shared zeropages should be enabled\n"); + + /* + * Let our VM execute a storage key instruction that should + * unshare all shared zeropages. + */ + vcpu_run(vcpu); + get_ucall(vcpu, &uc); + TEST_ASSERT_EQ(uc.cmd, UCALL_DONE); + + /* Verify that we don't have a shared zeropage anymore. */ + ksft_test_result(!maps_shared_zeropage(pagemap_fd, page1), + "Shared zeropage should be gone\n"); + + /* Verify that we don't get any new shared zeropages. */ + tmp = *page2; + asm volatile("" : "+r" (tmp)); + ksft_test_result(!maps_shared_zeropage(pagemap_fd, page2), + "Shared zeropages should be disabled\n"); + + kvm_vm_free(vm); + + ksft_finished(); +} From 0841ea4a3b416554be401a91aa267b7de838de8b Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Mon, 3 Jun 2024 19:22:22 +0800 Subject: [PATCH 318/537] iomap: keep on increasing i_size in iomap_write_end() Commit '943bc0882ceb ("iomap: don't increase i_size if it's not a write operation")' breaks xfs with realtime device on generic/561, the problem is when unaligned truncate down a xfs realtime inode with rtextsize > 1 fs block, xfs only zero out the EOF block but doesn't zero out the tail blocks that aligned to rtextsize, so if we don't increase i_size in iomap_write_end(), it could expose stale data after we do an append write beyond the aligned EOF block. xfs should zero out the tail blocks when truncate down, but before we finish that, let's fix the issue by just revert the changes in iomap_write_end(). Fixes: 943bc0882ceb ("iomap: don't increase i_size if it's not a write operation") Reported-by: Chandan Babu R Link: https://lore.kernel.org/linux-xfs/0b92a215-9d9b-3788-4504-a520778953c2@huaweicloud.com Signed-off-by: Zhang Yi Link: https://lore.kernel.org/r/20240603112222.2109341-1-yi.zhang@huaweicloud.com Tested-by: Chandan Babu R Reviewed-by: Christoph Hellwig Signed-off-by: Christian Brauner --- fs/iomap/buffered-io.c | 53 +++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index c5802a4593345..bd70fcbc168ea 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -877,22 +877,37 @@ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, size_t copied, struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); + loff_t old_size = iter->inode->i_size; + size_t written; if (srcmap->type == IOMAP_INLINE) { iomap_write_end_inline(iter, folio, pos, copied); - return true; + written = copied; + } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { + written = block_write_end(NULL, iter->inode->i_mapping, pos, + len, copied, &folio->page, NULL); + WARN_ON_ONCE(written != copied && written != 0); + } else { + written = __iomap_write_end(iter->inode, pos, len, copied, + folio) ? copied : 0; } - if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { - size_t bh_written; - - bh_written = block_write_end(NULL, iter->inode->i_mapping, pos, - len, copied, &folio->page, NULL); - WARN_ON_ONCE(bh_written != copied && bh_written != 0); - return bh_written == copied; + /* + * Update the in-memory inode size after copying the data into the page + * cache. It's up to the file system to write the updated size to disk, + * preferably after I/O completion so that no stale data is exposed. + * Only once that's done can we unlock and release the folio. + */ + if (pos + written > old_size) { + i_size_write(iter->inode, pos + written); + iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } + __iomap_put_folio(iter, pos, written, folio); - return __iomap_write_end(iter->inode, pos, len, copied, folio); + if (old_size < pos) + pagecache_isize_extended(iter->inode, old_size, pos); + + return written == copied; } static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) @@ -907,7 +922,6 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) do { struct folio *folio; - loff_t old_size; size_t offset; /* Offset into folio */ size_t bytes; /* Bytes to write to folio */ size_t copied; /* Bytes copied from user */ @@ -959,23 +973,6 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) written = iomap_write_end(iter, pos, bytes, copied, folio) ? copied : 0; - /* - * Update the in-memory inode size after copying the data into - * the page cache. It's up to the file system to write the - * updated size to disk, preferably after I/O completion so that - * no stale data is exposed. Only once that's done can we - * unlock and release the folio. - */ - old_size = iter->inode->i_size; - if (pos + written > old_size) { - i_size_write(iter->inode, pos + written); - iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; - } - __iomap_put_folio(iter, pos, written, folio); - - if (old_size < pos) - pagecache_isize_extended(iter->inode, old_size, pos); - cond_resched(); if (unlikely(written == 0)) { /* @@ -1346,7 +1343,6 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter) bytes = folio_size(folio) - offset; ret = iomap_write_end(iter, pos, bytes, bytes, folio); - __iomap_put_folio(iter, pos, bytes, folio); if (WARN_ON_ONCE(!ret)) return -EIO; @@ -1412,7 +1408,6 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) folio_mark_accessed(folio); ret = iomap_write_end(iter, pos, bytes, bytes, folio); - __iomap_put_folio(iter, pos, bytes, folio); if (WARN_ON_ONCE(!ret)) return -EIO; From f5ceb1bbc98c69536d4673a97315e8427e67de1b Mon Sep 17 00:00:00 2001 From: "Ritesh Harjani (IBM)" Date: Tue, 7 May 2024 14:25:42 +0530 Subject: [PATCH 319/537] iomap: Fix iomap_adjust_read_range for plen calculation If the extent spans the block that contains i_size, we need to handle both halves separately so that we properly zero data in the page cache for blocks that are entirely outside of i_size. But this is needed only when i_size is within the current folio under processing. "orig_pos + length > isize" can be true for all folios if the mapped extent length is greater than the folio size. That is making plen to break for every folio instead of only the last folio. So use orig_plen for checking if "orig_pos + orig_plen > isize". Signed-off-by: Ritesh Harjani (IBM) Link: https://lore.kernel.org/r/a32e5f9a4fcfdb99077300c4020ed7ae61d6e0f9.1715067055.git.ritesh.list@gmail.com Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Reviewed-by: Jan Kara cc: Ojaswin Mujoo Signed-off-by: Christian Brauner --- fs/iomap/buffered-io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index bd70fcbc168ea..d465589902790 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -241,6 +241,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, unsigned block_size = (1 << block_bits); size_t poff = offset_in_folio(folio, *pos); size_t plen = min_t(loff_t, folio_size(folio) - poff, length); + size_t orig_plen = plen; unsigned first = poff >> block_bits; unsigned last = (poff + plen - 1) >> block_bits; @@ -277,7 +278,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, * handle both halves separately so that we properly zero data in the * page cache for blocks that are entirely outside of i_size. */ - if (orig_pos <= isize && orig_pos + length > isize) { + if (orig_pos <= isize && orig_pos + orig_plen > isize) { unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; if (first <= end && last > end) From 9d274c19a71b3a276949933859610721a453946b Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Fri, 24 May 2024 13:58:11 -0700 Subject: [PATCH 320/537] btrfs: fix crash on racing fsync and size-extending write into prealloc We have been seeing crashes on duplicate keys in btrfs_set_item_key_safe(): BTRFS critical (device vdb): slot 4 key (450 108 8192) new key (450 108 8192) ------------[ cut here ]------------ kernel BUG at fs/btrfs/ctree.c:2620! invalid opcode: 0000 [#1] PREEMPT SMP PTI CPU: 0 PID: 3139 Comm: xfs_io Kdump: loaded Not tainted 6.9.0 #6 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-2.fc40 04/01/2014 RIP: 0010:btrfs_set_item_key_safe+0x11f/0x290 [btrfs] With the following stack trace: #0 btrfs_set_item_key_safe (fs/btrfs/ctree.c:2620:4) #1 btrfs_drop_extents (fs/btrfs/file.c:411:4) #2 log_one_extent (fs/btrfs/tree-log.c:4732:9) #3 btrfs_log_changed_extents (fs/btrfs/tree-log.c:4955:9) #4 btrfs_log_inode (fs/btrfs/tree-log.c:6626:9) #5 btrfs_log_inode_parent (fs/btrfs/tree-log.c:7070:8) #6 btrfs_log_dentry_safe (fs/btrfs/tree-log.c:7171:8) #7 btrfs_sync_file (fs/btrfs/file.c:1933:8) #8 vfs_fsync_range (fs/sync.c:188:9) #9 vfs_fsync (fs/sync.c:202:9) #10 do_fsync (fs/sync.c:212:9) #11 __do_sys_fdatasync (fs/sync.c:225:9) #12 __se_sys_fdatasync (fs/sync.c:223:1) #13 __x64_sys_fdatasync (fs/sync.c:223:1) #14 do_syscall_x64 (arch/x86/entry/common.c:52:14) #15 do_syscall_64 (arch/x86/entry/common.c:83:7) #16 entry_SYSCALL_64+0xaf/0x14c (arch/x86/entry/entry_64.S:121) So we're logging a changed extent from fsync, which is splitting an extent in the log tree. But this split part already exists in the tree, triggering the BUG(). This is the state of the log tree at the time of the crash, dumped with drgn (https://github.com/osandov/drgn/blob/main/contrib/btrfs_tree.py) to get more details than btrfs_print_leaf() gives us: >>> print_extent_buffer(prog.crashed_thread().stack_trace()[0]["eb"]) leaf 33439744 level 0 items 72 generation 9 owner 18446744073709551610 leaf 33439744 flags 0x100000000000000 fs uuid e5bd3946-400c-4223-8923-190ef1f18677 chunk uuid d58cb17e-6d02-494a-829a-18b7d8a399da item 0 key (450 INODE_ITEM 0) itemoff 16123 itemsize 160 generation 7 transid 9 size 8192 nbytes 8473563889606862198 block group 0 mode 100600 links 1 uid 0 gid 0 rdev 0 sequence 204 flags 0x10(PREALLOC) atime 1716417703.220000000 (2024-05-22 15:41:43) ctime 1716417704.983333333 (2024-05-22 15:41:44) mtime 1716417704.983333333 (2024-05-22 15:41:44) otime 17592186044416.000000000 (559444-03-08 01:40:16) item 1 key (450 INODE_REF 256) itemoff 16110 itemsize 13 index 195 namelen 3 name: 193 item 2 key (450 XATTR_ITEM 1640047104) itemoff 16073 itemsize 37 location key (0 UNKNOWN.0 0) type XATTR transid 7 data_len 1 name_len 6 name: user.a data a item 3 key (450 EXTENT_DATA 0) itemoff 16020 itemsize 53 generation 9 type 1 (regular) extent data disk byte 303144960 nr 12288 extent data offset 0 nr 4096 ram 12288 extent compression 0 (none) item 4 key (450 EXTENT_DATA 4096) itemoff 15967 itemsize 53 generation 9 type 2 (prealloc) prealloc data disk byte 303144960 nr 12288 prealloc data offset 4096 nr 8192 item 5 key (450 EXTENT_DATA 8192) itemoff 15914 itemsize 53 generation 9 type 2 (prealloc) prealloc data disk byte 303144960 nr 12288 prealloc data offset 8192 nr 4096 ... So the real problem happened earlier: notice that items 4 (4k-12k) and 5 (8k-12k) overlap. Both are prealloc extents. Item 4 straddles i_size and item 5 starts at i_size. Here is the state of the filesystem tree at the time of the crash: >>> root = prog.crashed_thread().stack_trace()[2]["inode"].root >>> ret, nodes, slots = btrfs_search_slot(root, BtrfsKey(450, 0, 0)) >>> print_extent_buffer(nodes[0]) leaf 30425088 level 0 items 184 generation 9 owner 5 leaf 30425088 flags 0x100000000000000 fs uuid e5bd3946-400c-4223-8923-190ef1f18677 chunk uuid d58cb17e-6d02-494a-829a-18b7d8a399da ... item 179 key (450 INODE_ITEM 0) itemoff 4907 itemsize 160 generation 7 transid 7 size 4096 nbytes 12288 block group 0 mode 100600 links 1 uid 0 gid 0 rdev 0 sequence 6 flags 0x10(PREALLOC) atime 1716417703.220000000 (2024-05-22 15:41:43) ctime 1716417703.220000000 (2024-05-22 15:41:43) mtime 1716417703.220000000 (2024-05-22 15:41:43) otime 1716417703.220000000 (2024-05-22 15:41:43) item 180 key (450 INODE_REF 256) itemoff 4894 itemsize 13 index 195 namelen 3 name: 193 item 181 key (450 XATTR_ITEM 1640047104) itemoff 4857 itemsize 37 location key (0 UNKNOWN.0 0) type XATTR transid 7 data_len 1 name_len 6 name: user.a data a item 182 key (450 EXTENT_DATA 0) itemoff 4804 itemsize 53 generation 9 type 1 (regular) extent data disk byte 303144960 nr 12288 extent data offset 0 nr 8192 ram 12288 extent compression 0 (none) item 183 key (450 EXTENT_DATA 8192) itemoff 4751 itemsize 53 generation 9 type 2 (prealloc) prealloc data disk byte 303144960 nr 12288 prealloc data offset 8192 nr 4096 Item 5 in the log tree corresponds to item 183 in the filesystem tree, but nothing matches item 4. Furthermore, item 183 is the last item in the leaf. btrfs_log_prealloc_extents() is responsible for logging prealloc extents beyond i_size. It first truncates any previously logged prealloc extents that start beyond i_size. Then, it walks the filesystem tree and copies the prealloc extent items to the log tree. If it hits the end of a leaf, then it calls btrfs_next_leaf(), which unlocks the tree and does another search. However, while the filesystem tree is unlocked, an ordered extent completion may modify the tree. In particular, it may insert an extent item that overlaps with an extent item that was already copied to the log tree. This may manifest in several ways depending on the exact scenario, including an EEXIST error that is silently translated to a full sync, overlapping items in the log tree, or this crash. This particular crash is triggered by the following sequence of events: - Initially, the file has i_size=4k, a regular extent from 0-4k, and a prealloc extent beyond i_size from 4k-12k. The prealloc extent item is the last item in its B-tree leaf. - The file is fsync'd, which copies its inode item and both extent items to the log tree. - An xattr is set on the file, which sets the BTRFS_INODE_COPY_EVERYTHING flag. - The range 4k-8k in the file is written using direct I/O. i_size is extended to 8k, but the ordered extent is still in flight. - The file is fsync'd. Since BTRFS_INODE_COPY_EVERYTHING is set, this calls copy_inode_items_to_log(), which calls btrfs_log_prealloc_extents(). - btrfs_log_prealloc_extents() finds the 4k-12k prealloc extent in the filesystem tree. Since it starts before i_size, it skips it. Since it is the last item in its B-tree leaf, it calls btrfs_next_leaf(). - btrfs_next_leaf() unlocks the path. - The ordered extent completion runs, which converts the 4k-8k part of the prealloc extent to written and inserts the remaining prealloc part from 8k-12k. - btrfs_next_leaf() does a search and finds the new prealloc extent 8k-12k. - btrfs_log_prealloc_extents() copies the 8k-12k prealloc extent into the log tree. Note that it overlaps with the 4k-12k prealloc extent that was copied to the log tree by the first fsync. - fsync calls btrfs_log_changed_extents(), which tries to log the 4k-8k extent that was written. - This tries to drop the range 4k-8k in the log tree, which requires adjusting the start of the 4k-12k prealloc extent in the log tree to 8k. - btrfs_set_item_key_safe() sees that there is already an extent starting at 8k in the log tree and calls BUG(). Fix this by detecting when we're about to insert an overlapping file extent item in the log tree and truncating the part that would overlap. CC: stable@vger.kernel.org # 6.1+ Reviewed-by: Filipe Manana Signed-off-by: Omar Sandoval Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 5146387b416bf..26a2e5aa08e9c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4860,18 +4860,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, path->slots[0]++; continue; } - if (!dropped_extents) { - /* - * Avoid logging extent items logged in past fsync calls - * and leading to duplicate keys in the log tree. - */ + /* + * Avoid overlapping items in the log tree. The first time we + * get here, get rid of everything from a past fsync. After + * that, if the current extent starts before the end of the last + * extent we copied, truncate the last one. This can happen if + * an ordered extent completion modifies the subvolume tree + * while btrfs_next_leaf() has the tree unlocked. + */ + if (!dropped_extents || key.offset < truncate_offset) { ret = truncate_inode_items(trans, root->log_root, inode, - truncate_offset, + min(key.offset, truncate_offset), BTRFS_EXTENT_DATA_KEY); if (ret) goto out; dropped_extents = true; } + truncate_offset = btrfs_file_extent_end(path); if (ins_nr == 0) start_slot = slot; ins_nr++; From fb33eb2ef0d88e75564983ef057b44c5b7e4fded Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 3 Jun 2024 12:49:08 +0100 Subject: [PATCH 321/537] btrfs: fix leak of qgroup extent records after transaction abort Qgroup extent records are created when delayed ref heads are created and then released after accounting extents at btrfs_qgroup_account_extents(), called during the transaction commit path. If a transaction is aborted we free the qgroup records by calling btrfs_qgroup_destroy_extent_records() at btrfs_destroy_delayed_refs(), unless we don't have delayed references. We are incorrectly assuming that no delayed references means we don't have qgroup extents records. We can currently have no delayed references because we ran them all during a transaction commit and the transaction was aborted after that due to some error in the commit path. So fix this by ensuring we btrfs_qgroup_destroy_extent_records() at btrfs_destroy_delayed_refs() even if we don't have any delayed references. Reported-by: syzbot+0fecc032fa134afd49df@syzkaller.appspotmail.com Link: https://lore.kernel.org/linux-btrfs/0000000000004e7f980619f91835@google.com/ Fixes: 81f7eb00ff5b ("btrfs: destroy qgroup extent records on transaction abort") CC: stable@vger.kernel.org # 6.1+ Reviewed-by: Josef Bacik Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a91a8056758a3..242ada7e47b40 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4538,18 +4538,10 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, struct btrfs_fs_info *fs_info) { struct rb_node *node; - struct btrfs_delayed_ref_root *delayed_refs; + struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs; struct btrfs_delayed_ref_node *ref; - delayed_refs = &trans->delayed_refs; - spin_lock(&delayed_refs->lock); - if (atomic_read(&delayed_refs->num_entries) == 0) { - spin_unlock(&delayed_refs->lock); - btrfs_debug(fs_info, "delayed_refs has NO entry"); - return; - } - while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) { struct btrfs_delayed_ref_head *head; struct rb_node *n; From 267cace556e8a53d703119f7435ab556209e5b6a Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Sun, 26 May 2024 07:59:08 -0500 Subject: [PATCH 322/537] drm/amd: Fix shutdown (again) on some SMU v13.0.4/11 platforms commit cd94d1b182d2 ("dm/amd/pm: Fix problems with reboot/shutdown for some SMU 13.0.4/13.0.11 users") attempted to fix shutdown issues that were reported since commit 31729e8c21ec ("drm/amd/pm: fixes a random hang in S4 for SMU v13.0.4/11") but caused issues for some people. Adjust the workaround flow to properly only apply in the S4 case: -> For shutdown go through SMU_MSG_PrepareMp1ForUnload -> For S4 go through SMU_MSG_GfxDeviceDriverReset and SMU_MSG_PrepareMp1ForUnload Reported-and-tested-by: lectrode Closes: https://github.com/void-linux/void-packages/issues/50417 Cc: stable@vger.kernel.org Fixes: cd94d1b182d2 ("dm/amd/pm: Fix problems with reboot/shutdown for some SMU 13.0.4/13.0.11 users") Reviewed-by: Tim Huang Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index bc241b593db15..b6257f34a7c65 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && adev->in_s4) { - /* Adds a GFX reset as workaround just before sending the - * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering - * an invalid state. - */ - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, - SMU_RESET_MODE_2, NULL); - if (ret) - return ret; + if (!en && !adev->in_s0ix) { + if (adev->in_s4) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + } ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); } From c6c4dd54012551cce5cde408b35468f2c62b0cce Mon Sep 17 00:00:00 2001 From: Tasos Sahanidis Date: Fri, 31 May 2024 19:23:30 +0300 Subject: [PATCH 323/537] drm/amdgpu/pptable: Fix UBSAN array-index-out-of-bounds Flexible arrays used [1] instead of []. Replace the former with the latter to resolve multiple UBSAN warnings observed on boot with a BONAIRE card. In addition, use the __counted_by attribute where possible to hint the length of the arrays to the compiler and any sanitizers. Signed-off-by: Tasos Sahanidis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/pptable.h | 91 ++++++++++++++------------- 1 file changed, 49 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/amd/include/pptable.h b/drivers/gpu/drm/amd/include/pptable.h index 2e8e6c9875f6c..f83ace2d7ec30 100644 --- a/drivers/gpu/drm/amd/include/pptable.h +++ b/drivers/gpu/drm/amd/include/pptable.h @@ -477,31 +477,30 @@ typedef struct _ATOM_PPLIB_STATE_V2 } ATOM_PPLIB_STATE_V2; typedef struct _StateArray{ - //how many states we have - UCHAR ucNumEntries; - - ATOM_PPLIB_STATE_V2 states[1]; + //how many states we have + UCHAR ucNumEntries; + + ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */; }StateArray; typedef struct _ClockInfoArray{ - //how many clock levels we have - UCHAR ucNumEntries; - - //sizeof(ATOM_PPLIB_CLOCK_INFO) - UCHAR ucEntrySize; - - UCHAR clockInfo[]; + //how many clock levels we have + UCHAR ucNumEntries; + + //sizeof(ATOM_PPLIB_CLOCK_INFO) + UCHAR ucEntrySize; + + UCHAR clockInfo[]; }ClockInfoArray; typedef struct _NonClockInfoArray{ + //how many non-clock levels we have. normally should be same as number of states + UCHAR ucNumEntries; + //sizeof(ATOM_PPLIB_NONCLOCK_INFO) + UCHAR ucEntrySize; - //how many non-clock levels we have. normally should be same as number of states - UCHAR ucNumEntries; - //sizeof(ATOM_PPLIB_NONCLOCK_INFO) - UCHAR ucEntrySize; - - ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[]; + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries); }NonClockInfoArray; typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record @@ -513,8 +512,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table { - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. + // Number of entries. + UCHAR ucNumEntries; + // Dynamically allocate entries. + ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[] __counted_by(ucNumEntries); }ATOM_PPLIB_Clock_Voltage_Dependency_Table; typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record @@ -529,8 +530,10 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table { - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. + // Number of entries. + UCHAR ucNumEntries; + // Dynamically allocate entries. + ATOM_PPLIB_Clock_Voltage_Limit_Record entries[] __counted_by(ucNumEntries); }ATOM_PPLIB_Clock_Voltage_Limit_Table; union _ATOM_PPLIB_CAC_Leakage_Record @@ -553,8 +556,10 @@ typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record; typedef struct _ATOM_PPLIB_CAC_Leakage_Table { - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. + // Number of entries. + UCHAR ucNumEntries; + // Dynamically allocate entries. + ATOM_PPLIB_CAC_Leakage_Record entries[] __counted_by(ucNumEntries); }ATOM_PPLIB_CAC_Leakage_Table; typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record @@ -568,8 +573,10 @@ typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table { - UCHAR ucNumEntries; // Number of entries. - ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. + // Number of entries. + UCHAR ucNumEntries; + // Dynamically allocate entries. + ATOM_PPLIB_PhaseSheddingLimits_Record entries[] __counted_by(ucNumEntries); }ATOM_PPLIB_PhaseSheddingLimits_Table; typedef struct _VCEClockInfo{ @@ -580,8 +587,8 @@ typedef struct _VCEClockInfo{ }VCEClockInfo; typedef struct _VCEClockInfoArray{ - UCHAR ucNumEntries; - VCEClockInfo entries[1]; + UCHAR ucNumEntries; + VCEClockInfo entries[] __counted_by(ucNumEntries); }VCEClockInfoArray; typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record @@ -592,8 +599,8 @@ typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table { - UCHAR numEntries; - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; + UCHAR numEntries; + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_VCE_State_Record @@ -604,8 +611,8 @@ typedef struct _ATOM_PPLIB_VCE_State_Record typedef struct _ATOM_PPLIB_VCE_State_Table { - UCHAR numEntries; - ATOM_PPLIB_VCE_State_Record entries[1]; + UCHAR numEntries; + ATOM_PPLIB_VCE_State_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_VCE_State_Table; @@ -626,8 +633,8 @@ typedef struct _UVDClockInfo{ }UVDClockInfo; typedef struct _UVDClockInfoArray{ - UCHAR ucNumEntries; - UVDClockInfo entries[1]; + UCHAR ucNumEntries; + UVDClockInfo entries[] __counted_by(ucNumEntries); }UVDClockInfoArray; typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record @@ -638,8 +645,8 @@ typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table { - UCHAR numEntries; - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; + UCHAR numEntries; + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_UVD_Table @@ -657,8 +664,8 @@ typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record }ATOM_PPLIB_SAMClk_Voltage_Limit_Record; typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{ - UCHAR numEntries; - ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[]; + UCHAR numEntries; + ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_SAMClk_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_SAMU_Table @@ -675,8 +682,8 @@ typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record }ATOM_PPLIB_ACPClk_Voltage_Limit_Record; typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{ - UCHAR numEntries; - ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; + UCHAR numEntries; + ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[] __counted_by(numEntries); }ATOM_PPLIB_ACPClk_Voltage_Limit_Table; typedef struct _ATOM_PPLIB_ACP_Table @@ -743,9 +750,9 @@ typedef struct ATOM_PPLIB_VQ_Budgeting_Record{ } ATOM_PPLIB_VQ_Budgeting_Record; typedef struct ATOM_PPLIB_VQ_Budgeting_Table { - UCHAR revid; - UCHAR numEntries; - ATOM_PPLIB_VQ_Budgeting_Record entries[1]; + UCHAR revid; + UCHAR numEntries; + ATOM_PPLIB_VQ_Budgeting_Record entries[] __counted_by(numEntries); } ATOM_PPLIB_VQ_Budgeting_Table; #pragma pack() From c462ecd659b5fce731f1d592285832fd6ad54053 Mon Sep 17 00:00:00 2001 From: Andreas Hindborg Date: Mon, 3 Jun 2024 21:26:45 +0200 Subject: [PATCH 324/537] null_blk: fix validation of block size Block size should be between 512 and PAGE_SIZE and be a power of 2. The current check does not validate this, so update the check. Without this patch, null_blk would Oops due to a null pointer deref when loaded with bs=1536 [1]. Link: https://lore.kernel.org/all/87wmn8mocd.fsf@metaspace.dk/ Signed-off-by: Andreas Hindborg Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/20240603192645.977968-1-nmi@metaspace.dk [axboe: remove unnecessary braces and != 0 check] Signed-off-by: Jens Axboe --- drivers/block/null_blk/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 631dca2e4e844..75f189e42f885 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1824,8 +1824,8 @@ static int null_validate_conf(struct nullb_device *dev) dev->queue_mode = NULL_Q_MQ; } - dev->blocksize = round_down(dev->blocksize, 512); - dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); + if (blk_validate_block_size(dev->blocksize)) + return -EINVAL; if (dev->use_per_node_hctx) { if (dev->submit_queues != nr_online_nodes) From c625dabbf1c4a8e77e4734014f2fde7aa9071a1f Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Mon, 3 Apr 2023 16:42:44 +0000 Subject: [PATCH 325/537] x86/amd_nb: Check for invalid SMN reads AMD Zen-based systems use a System Management Network (SMN) that provides access to implementation-specific registers. SMN accesses are done indirectly through an index/data pair in PCI config space. The PCI config access may fail and return an error code. This would prevent the "read" value from being updated. However, the PCI config access may succeed, but the return value may be invalid. This is in similar fashion to PCI bad reads, i.e. return all bits set. Most systems will return 0 for SMN addresses that are not accessible. This is in line with AMD convention that unavailable registers are Read-as-Zero/Writes-Ignored. However, some systems will return a "PCI Error Response" instead. This value, along with an error code of 0 from the PCI config access, will confuse callers of the amd_smn_read() function. Check for this condition, clear the return value, and set a proper error code. Fixes: ddfe43cdc0da ("x86/amd_nb: Add SMN and Indirect Data Fabric access for AMD Fam17h") Signed-off-by: Yazen Ghannam Signed-off-by: Borislav Petkov (AMD) Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230403164244.471141-1-yazen.ghannam@amd.com --- arch/x86/kernel/amd_nb.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 3cf156f708591..027a8c7a2c9e6 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -215,7 +215,14 @@ static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) int amd_smn_read(u16 node, u32 address, u32 *value) { - return __amd_smn_rw(node, address, value, false); + int err = __amd_smn_rw(node, address, value, false); + + if (PCI_POSSIBLE_ERROR(*value)) { + err = -ENODEV; + *value = 0; + } + + return err; } EXPORT_SYMBOL_GPL(amd_smn_read); From 229bedbf62b13af5aba6525ad10b62ad38d9ccb5 Mon Sep 17 00:00:00 2001 From: Aleksandr Mishin Date: Tue, 4 Jun 2024 13:05:52 +0300 Subject: [PATCH 326/537] net/mlx5: Fix tainted pointer delete is case of flow rules creation fail In case of flow rule creation fail in mlx5_lag_create_port_sel_table(), instead of previously created rules, the tainted pointer is deleted deveral times. Fix this bug by using correct flow rules pointers. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 352899f384d4 ("net/mlx5: Lag, use buckets in hash mode") Signed-off-by: Aleksandr Mishin Reviewed-by: Jacob Keller Reviewed-by: Tariq Toukan Link: https://lore.kernel.org/r/20240604100552.25201-1-amishin@t-argos.ru Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index c16b462ddedf7..ab2717012b79b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, &dest, 1); if (IS_ERR(lag_definer->rules[idx])) { err = PTR_ERR(lag_definer->rules[idx]); - while (i--) - while (j--) + do { + while (j--) { + idx = i * ldev->buckets + j; mlx5_del_flow_rules(lag_definer->rules[idx]); + } + j = ldev->buckets; + } while (i--); goto destroy_fg; } } From f921a58ae20852d188f70842431ce6519c4fdc36 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 4 Jun 2024 18:15:11 +0000 Subject: [PATCH 327/537] net/sched: taprio: always validate TCA_TAPRIO_ATTR_PRIOMAP If one TCA_TAPRIO_ATTR_PRIOMAP attribute has been provided, taprio_parse_mqprio_opt() must validate it, or userspace can inject arbitrary data to the kernel, the second time taprio_change() is called. First call (with valid attributes) sets dev->num_tc to a non zero value. Second call (with arbitrary mqprio attributes) returns early from taprio_parse_mqprio_opt() and bad things can happen. Fixes: a3d43c0d56f1 ("taprio: Add support adding an admin schedule") Reported-by: Noam Rathaus Signed-off-by: Eric Dumazet Acked-by: Vinicius Costa Gomes Reviewed-by: Vladimir Oltean Link: https://lore.kernel.org/r/20240604181511.769870-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/sched/sch_taprio.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 937a0c513c174..b284a06b5a75f 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1176,16 +1176,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev, { bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags); - if (!qopt && !dev->num_tc) { - NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); - return -EINVAL; - } - - /* If num_tc is already set, it means that the user already - * configured the mqprio part - */ - if (dev->num_tc) + if (!qopt) { + if (!dev->num_tc) { + NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary"); + return -EINVAL; + } return 0; + } /* taprio imposes that traffic classes map 1:n to tx queues */ if (qopt->num_tc > dev->num_tx_queues) { From 323a359f9b077f382f4483023d096a4d316fd135 Mon Sep 17 00:00:00 2001 From: Karol Kolacinski Date: Tue, 4 Jun 2024 14:05:27 +0200 Subject: [PATCH 328/537] ptp: Fix error message on failed pin verification On failed verification of PTP clock pin, error message prints channel number instead of pin index after "pin", which is incorrect. Fix error message by adding channel number to the message and printing pin number instead of channel number. Fixes: 6092315dfdec ("ptp: introduce programmable pins.") Signed-off-by: Karol Kolacinski Acked-by: Richard Cochran Link: https://lore.kernel.org/r/20240604120555.16643-1-karol.kolacinski@intel.com Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_chardev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 7513018c9f9ac..2067b0120d083 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin, } if (info->verify(info, pin, func, chan)) { - pr_err("driver cannot use function %u on pin %u\n", func, chan); + pr_err("driver cannot use function %u and channel %u on pin %u\n", + func, chan, pin); return -EOPNOTSUPP; } From 7926d51f73e0434a6250c2fd1a0555f98d9a62da Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Tue, 4 Jun 2024 22:25:21 -0400 Subject: [PATCH 329/537] scsi: sd: Use READ(16) when reading block zero on large capacity disks Commit 321da3dc1f3c ("scsi: sd: usb_storage: uas: Access media prior to querying device properties") triggered a read to LBA 0 before attempting to inquire about device characteristics. This was done because some protocol bridge devices will return generic values until an attached storage device's media has been accessed. Pierre Tomon reported that this change caused problems on a large capacity external drive connected via a bridge device. The bridge in question does not appear to implement the READ(10) command. Issue a READ(16) instead of READ(10) when a device has been identified as preferring 16-byte commands (use_16_for_rw heuristic). Link: https://bugzilla.kernel.org/show_bug.cgi?id=218890 Link: https://lore.kernel.org/r/70dd7ae0-b6b1-48e1-bb59-53b7c7f18274@rowland.harvard.edu Link: https://lore.kernel.org/r/20240605022521.3960956-1-martin.petersen@oracle.com Fixes: 321da3dc1f3c ("scsi: sd: usb_storage: uas: Access media prior to querying device properties") Cc: stable@vger.kernel.org Reported-by: Pierre Tomon Suggested-by: Alan Stern Tested-by: Pierre Tomon Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/sd.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 332eb9dac22d9..fbc11046bbf60 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3565,16 +3565,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, static void sd_read_block_zero(struct scsi_disk *sdkp) { - unsigned int buf_len = sdkp->device->sector_size; - char *buffer, cmd[10] = { }; + struct scsi_device *sdev = sdkp->device; + unsigned int buf_len = sdev->sector_size; + u8 *buffer, cmd[16] = { }; buffer = kmalloc(buf_len, GFP_KERNEL); if (!buffer) return; - cmd[0] = READ_10; - put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ - put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + if (sdev->use_16_for_rw) { + cmd[0] = READ_16; + put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */ + } else { + cmd[0] = READ_10; + put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */ + put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */ + } scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len, SD_TIMEOUT, sdkp->max_retries, NULL); From 4254dfeda82f20844299dca6c38cbffcfd499f41 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Wed, 5 Jun 2024 01:55:29 -0700 Subject: [PATCH 330/537] scsi: mpt3sas: Avoid test/set_bit() operating in non-allocated memory There is a potential out-of-bounds access when using test_bit() on a single word. The test_bit() and set_bit() functions operate on long values, and when testing or setting a single word, they can exceed the word boundary. KASAN detects this issue and produces a dump: BUG: KASAN: slab-out-of-bounds in _scsih_add_device.constprop.0 (./arch/x86/include/asm/bitops.h:60 ./include/asm-generic/bitops/instrumented-atomic.h:29 drivers/scsi/mpt3sas/mpt3sas_scsih.c:7331) mpt3sas Write of size 8 at addr ffff8881d26e3c60 by task kworker/u1536:2/2965 For full log, please look at [1]. Make the allocation at least the size of sizeof(unsigned long) so that set_bit() and test_bit() have sufficient room for read/write operations without overwriting unallocated memory. [1] Link: https://lore.kernel.org/all/ZkNcALr3W3KGYYJG@gmail.com/ Fixes: c696f7b83ede ("scsi: mpt3sas: Implement device_remove_in_progress check in IOCTL path") Cc: stable@vger.kernel.org Suggested-by: Keith Busch Signed-off-by: Breno Leitao Link: https://lore.kernel.org/r/20240605085530.499432-1-leitao@debian.org Reviewed-by: Keith Busch Signed-off-by: Martin K. Petersen --- drivers/scsi/mpt3sas/mpt3sas_base.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 258647fc6bddb..1092497563b22 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pd_handles_sz++; + /* + * pd_handles_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may occur. + */ + ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long)); + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); if (!ioc->pd_handles) { @@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); if (ioc->facts.MaxDevHandle % 8) ioc->pend_os_device_add_sz++; + + /* + * pend_os_device_add_sz should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory may occur. + */ + ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz, + sizeof(unsigned long)); ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, GFP_KERNEL); if (!ioc->pend_os_device_add) { @@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) if (ioc->facts.MaxDevHandle % 8) pd_handles_sz++; + /* + * pd_handles should have, at least, the minimal room for + * set_bit()/test_bit(), otherwise out-of-memory touch may + * occur. + */ + pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long)); pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, GFP_KERNEL); if (!pd_handles) { From 426826933109093503e7ef15d49348fc5ab505fe Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Tue, 21 May 2024 13:47:17 -0500 Subject: [PATCH 331/537] drm/vmwgfx: Filter modes which exceed graphics memory SVGA requires individual surfaces to fit within graphics memory (max_mob_pages) which means that modes with a final buffer size that would exceed graphics memory must be pruned otherwise creation will fail. Additionally llvmpipe requires its buffer height and width to be a multiple of its tile size which is 64. As a result we have to anticipate that llvmpipe will round up the mode size passed to it by the compositor when it creates buffers and filter modes where this rounding exceeds graphics memory. This fixes an issue where VMs with low graphics memory (< 64MiB) configured with high resolution mode boot to a black screen because surface creation fails. Fixes: d947d1b71deb ("drm/vmwgfx: Add and connect connector helper function") Signed-off-by: Ian Forbes Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20240521184720.767-2-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 45 ++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 2041c4d48daa5..52e8ec3b23575 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -43,7 +43,14 @@ #define vmw_connector_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.connector) - +/* + * Some renderers such as llvmpipe will align the width and height of their + * buffers to match their tile size. We need to keep this in mind when exposing + * modes to userspace so that this possible over-allocation will not exceed + * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the + * tile size of current renderers. + */ +#define GPU_TILE_SIZE 64 enum stdu_content_type { SAME_AS_DISPLAY = 0, @@ -829,7 +836,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector) vmw_stdu_destroy(vmw_connector_to_stdu(connector)); } +static enum drm_mode_status +vmw_stdu_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + enum drm_mode_status ret; + struct drm_device *dev = connector->dev; + struct vmw_private *dev_priv = vmw_priv(dev); + u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4; + /* Align width and height to account for GPU tile over-alignment */ + u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) * + ALIGN(mode->vdisplay, GPU_TILE_SIZE) * + assumed_cpp; + required_mem = ALIGN(required_mem, PAGE_SIZE); + + ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width, + dev_priv->stdu_max_height); + if (ret != MODE_OK) + return ret; + ret = drm_mode_validate_size(mode, dev_priv->texture_max_width, + dev_priv->texture_max_height); + if (ret != MODE_OK) + return ret; + + if (required_mem > dev_priv->max_primary_mem) + return MODE_MEM; + + if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE) + return MODE_MEM; + + if (required_mem > dev_priv->max_mob_size) + return MODE_MEM; + + return MODE_OK; +} static const struct drm_connector_funcs vmw_stdu_connector_funcs = { .dpms = vmw_du_connector_dpms, @@ -845,7 +886,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = { static const struct drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = { .get_modes = vmw_connector_get_modes, - .mode_valid = vmw_connector_mode_valid + .mode_valid = vmw_stdu_connector_mode_valid }; From fb5e19d2dd03eb995ccd468d599b2337f7f66555 Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Tue, 21 May 2024 13:47:18 -0500 Subject: [PATCH 332/537] drm/vmwgfx: 3D disabled should not effect STDU memory limits This limit became a hard cap starting with the change referenced below. Surface creation on the device will fail if the requested size is larger than this limit so altering the value arbitrarily will expose modes that are too large for the device's hard limits. Fixes: 7ebb47c9f9ab ("drm/vmwgfx: Read new register for GB memory when available") Signed-off-by: Ian Forbes Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20240521184720.767-3-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8f1730aeacc9e..18a1f05c6fa85 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -960,13 +960,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) vmw_read(dev_priv, SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); - /* - * Workaround for low memory 2D VMs to compensate for the - * allocation taken by fbdev - */ - if (!(dev_priv->capabilities & SVGA_CAP_3D)) - mem_size *= 3; - dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->max_primary_mem = vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM); From dde1de06bd7248fd83c4ce5cf0dbe9e4e95bbb91 Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Tue, 21 May 2024 13:47:19 -0500 Subject: [PATCH 333/537] drm/vmwgfx: Remove STDU logic from generic mode_valid function STDU has its own mode_valid function now so this logic can be removed from the generic version. Fixes: 935f795045a6 ("drm/vmwgfx: Refactor drm connector probing for display modes") Signed-off-by: Ian Forbes Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20240521184720.767-4-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 3 --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 26 +++++++++----------------- 2 files changed, 9 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 4ecaea0026fcc..a1ce41e1c4684 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1043,9 +1043,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth); -bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height); int vmw_kms_present(struct vmw_private *dev_priv, struct drm_file *file_priv, struct vmw_framebuffer *vfb, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 13b2820cae51d..9532258a0848c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2171,13 +2171,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv, return 0; } +static bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, - uint32_t pitch, - uint32_t height) + u64 pitch, + u64 height) { - return ((u64) pitch * (u64) height) < (u64) - ((dev_priv->active_display_unit == vmw_du_screen_target) ? - dev_priv->max_primary_mem : dev_priv->vram_size); + return (pitch * height) < (u64)dev_priv->vram_size; } /** @@ -2873,25 +2872,18 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + enum drm_mode_status ret; struct drm_device *dev = connector->dev; struct vmw_private *dev_priv = vmw_priv(dev); - u32 max_width = dev_priv->texture_max_width; - u32 max_height = dev_priv->texture_max_height; u32 assumed_cpp = 4; if (dev_priv->assume_16bpp) assumed_cpp = 2; - if (dev_priv->active_display_unit == vmw_du_screen_target) { - max_width = min(dev_priv->stdu_max_width, max_width); - max_height = min(dev_priv->stdu_max_height, max_height); - } - - if (max_width < mode->hdisplay) - return MODE_BAD_HVALUE; - - if (max_height < mode->vdisplay) - return MODE_BAD_VVALUE; + ret = drm_mode_validate_size(mode, dev_priv->texture_max_width, + dev_priv->texture_max_height); + if (ret != MODE_OK) + return ret; if (!vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * assumed_cpp, From a54a200f3dc710db0572aba45c5c06b12b74489a Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Tue, 21 May 2024 13:47:20 -0500 Subject: [PATCH 334/537] drm/vmwgfx: Standardize use of kibibytes when logging Use the same standard abbreviation KiB instead of incorrect variants. Signed-off-by: Ian Forbes Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20240521184720.767-5-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 12 ++++++------ drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 18a1f05c6fa85..823d8d2da17c3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -746,7 +746,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, dev->vram_size = pci_resource_len(pdev, 2); drm_info(&dev->drm, - "Register MMIO at 0x%pa size is %llu kiB\n", + "Register MMIO at 0x%pa size is %llu KiB\n", &rmmio_start, (uint64_t)rmmio_size / 1024); dev->rmmio = devm_ioremap(dev->drm.dev, rmmio_start, @@ -765,7 +765,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, fifo_size = pci_resource_len(pdev, 2); drm_info(&dev->drm, - "FIFO at %pa size is %llu kiB\n", + "FIFO at %pa size is %llu KiB\n", &fifo_start, (uint64_t)fifo_size / 1024); dev->fifo_mem = devm_memremap(dev->drm.dev, fifo_start, @@ -790,7 +790,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, * SVGA_REG_VRAM_SIZE. */ drm_info(&dev->drm, - "VRAM at %pa size is %llu kiB\n", + "VRAM at %pa size is %llu KiB\n", &dev->vram_start, (uint64_t)dev->vram_size / 1024); return 0; @@ -984,13 +984,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->max_primary_mem = dev_priv->vram_size; } drm_info(&dev_priv->drm, - "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n", + "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n", (u64)dev_priv->vram_size / 1024, (u64)dev_priv->fifo_mem_size / 1024, dev_priv->memory_size / 1024); drm_info(&dev_priv->drm, - "MOB limits: max mob size = %u kB, max mob pages = %u\n", + "MOB limits: max mob size = %u KiB, max mob pages = %u\n", dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages); ret = vmw_dma_masks(dev_priv); @@ -1008,7 +1008,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) (unsigned)dev_priv->max_gmr_pages); } drm_info(&dev_priv->drm, - "Maximum display memory size is %llu kiB\n", + "Maximum display memory size is %llu KiB\n", (uint64_t)dev_priv->max_primary_mem / 1024); /* Need mmio memory to check for fifo pitchlock cap. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index a0b47c9b33f55..5bd967fbcf554 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -94,14 +94,14 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, } else new_max_pages = gman->max_gmr_pages * 2; if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) { - DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n", + DRM_WARN("vmwgfx: increasing guest mob limits to %u KiB.\n", ((new_max_pages) << (PAGE_SHIFT - 10))); gman->max_gmr_pages = new_max_pages; } else { char buf[256]; snprintf(buf, sizeof(buf), - "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n", + "vmwgfx, error: guest graphics is out of memory (mob limit at: %u KiB).\n", ((gman->max_gmr_pages) << (PAGE_SHIFT - 10))); vmw_host_printf(buf); DRM_WARN("%s", buf); From 7ef91dcba172441582962602ff6899bfec6078b7 Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Fri, 31 May 2024 15:33:58 -0500 Subject: [PATCH 335/537] drm/vmwgfx: Don't destroy Screen Target when CRTC is enabled but inactive drm_crtc_helper_funcs::atomic_disable can be called even when the CRTC is still enabled. This can occur when the mode changes or the CRTC is set as inactive. In the case where the CRTC is being set as inactive we only want to blank the screen. The Screen Target should remain intact as long as the mode has not changed and CRTC is enabled. This fixes a bug with GDM where locking the screen results in a permanent black screen because the Screen Target is no longer defined. Fixes: 7b0062036c3b ("drm/vmwgfx: Implement virtual crc generation") Signed-off-by: Ian Forbes Reviewed-by: Martin Krastev Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20240531203358.26677-1-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 52e8ec3b23575..afc0bc4ef4e1e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -421,6 +421,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; + struct drm_crtc_state *new_crtc_state; int ret; if (!crtc) { @@ -430,6 +431,7 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (dev_priv->vkms_enabled) drm_crtc_vblank_off(crtc); @@ -441,6 +443,14 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, (void) vmw_stdu_update_st(dev_priv, stdu); + /* Don't destroy the Screen Target if we are only setting the + * display as inactive + */ + if (new_crtc_state->enable && + !new_crtc_state->active && + !new_crtc_state->mode_changed) + return; + ret = vmw_stdu_destroy_st(dev_priv, stdu); if (ret) DRM_ERROR("Failed to destroy Screen Target\n"); From 0d648dd5c899f33154b98a6aef6e3dab0f4de613 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 23 May 2024 10:36:39 +0800 Subject: [PATCH 336/537] mm: drop the 'anon_' prefix for swap-out mTHP counters The mTHP swap related counters: 'anon_swpout' and 'anon_swpout_fallback' are confusing with an 'anon_' prefix, since the shmem can swap out non-anonymous pages. So drop the 'anon_' prefix to keep consistent with the old swap counter names. This is needed in 6.10-rcX to avoid having an inconsistent ABI out in the field. Link: https://lkml.kernel.org/r/7a8989c13299920d7589007a30065c3e2c19f0e0.1716431702.git.baolin.wang@linux.alibaba.com Fixes: d0f048ac39f6 ("mm: add per-order mTHP anon_swpout and anon_swpout_fallback counters") Fixes: 42248b9d34ea ("mm: add docs for per-order mTHP counters and transhuge_page ABI") Signed-off-by: Baolin Wang Suggested-by: "Huang, Ying" Acked-by: Barry Song Cc: David Hildenbrand Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Cc: Zi Yan Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/transhuge.rst | 4 ++-- include/linux/huge_mm.h | 4 ++-- mm/huge_memory.c | 8 ++++---- mm/page_io.c | 2 +- mm/vmscan.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 076443cc10a6c..d414d3f5592a8 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -467,11 +467,11 @@ anon_fault_fallback_charge instead falls back to using huge pages with lower orders or small pages even though the allocation was successful. -anon_swpout +swpout is incremented every time a huge page is swapped out in one piece without splitting. -anon_swpout_fallback +swpout_fallback is incremented if a huge page has to be split before swapout. Usually because failed to allocate some continuous swap space for the huge page. diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index c8d3ec116e291..8c72d37865838 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -269,8 +269,8 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_FALLBACK, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, - MTHP_STAT_ANON_SWPOUT, - MTHP_STAT_ANON_SWPOUT_FALLBACK, + MTHP_STAT_SWPOUT, + MTHP_STAT_SWPOUT_FALLBACK, __MTHP_STAT_COUNT }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 317de2afd3718..89932fd0f62e8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -558,15 +558,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); -DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT); -DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK); +DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); +DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); static struct attribute *stats_attrs[] = { &anon_fault_alloc_attr.attr, &anon_fault_fallback_attr.attr, &anon_fault_fallback_charge_attr.attr, - &anon_swpout_attr.attr, - &anon_swpout_fallback_attr.attr, + &swpout_attr.attr, + &swpout_fallback_attr.attr, NULL, }; diff --git a/mm/page_io.c b/mm/page_io.c index 46c603dddf043..0a150c240bf4f 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -217,7 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio) count_memcg_folio_events(folio, THP_SWPOUT, 1); count_vm_event(THP_SWPOUT); } - count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT); + count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); #endif count_vm_events(PSWPOUT, folio_nr_pages(folio)); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d55e8d07ffc4a..2e34de9cd0d4f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1227,7 +1227,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, THP_SWPOUT_FALLBACK, 1); count_vm_event(THP_SWPOUT_FALLBACK); } - count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK); + count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); #endif if (!add_to_swap(folio)) goto activate_locked_split; From 94d46bf17916965e918bd2f3d2eec057f7c5578d Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 24 May 2024 08:50:48 +1200 Subject: [PATCH 337/537] mm: huge_mm: fix undefined reference to `mthp_stats' for CONFIG_SYSFS=n if CONFIG_SYSFS is not enabled in config, we get the below error, All errors (new ones prefixed by >>): s390-linux-ld: mm/memory.o: in function `count_mthp_stat': >> include/linux/huge_mm.h:285:(.text+0x191c): undefined reference to `mthp_stats' s390-linux-ld: mm/huge_memory.o:(.rodata+0x10): undefined reference to `mthp_stats' vim +285 include/linux/huge_mm.h 279 280 static inline void count_mthp_stat(int order, enum mthp_stat_item item) 281 { 282 if (order <= 0 || order > PMD_ORDER) 283 return; 284 > 285 this_cpu_inc(mthp_stats.stats[order][item]); 286 } 287 Link: https://lkml.kernel.org/r/20240523210045.40444-1-21cnbao@gmail.com Fixes: ec33687c6749 ("mm: add per-order mTHP anon_fault_alloc and anon_fault_fallback counters") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202405231728.tCAogiSI-lkp@intel.com/ Signed-off-by: Barry Song Tested-by: Yujie Liu Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 8c72d37865838..2aa986a5cd1b5 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -278,6 +278,7 @@ struct mthp_stat { unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; }; +#ifdef CONFIG_SYSFS DECLARE_PER_CPU(struct mthp_stat, mthp_stats); static inline void count_mthp_stat(int order, enum mthp_stat_item item) @@ -287,6 +288,11 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item) this_cpu_inc(mthp_stats.stats[order][item]); } +#else +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ +} +#endif #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ From 6434e69814b159608a23135ca2be36024f402717 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 24 May 2024 12:54:44 +1200 Subject: [PATCH 338/537] mm: arm64: fix the out-of-bounds issue in contpte_clear_young_dirty_ptes We are passing a huge nr to __clear_young_dirty_ptes() right now. While we should pass the number of pages, we are actually passing CONT_PTE_SIZE. This is causing lots of crashes of MADV_FREE, panic oops could vary everytime. Link: https://lkml.kernel.org/r/20240524005444.135417-1-21cnbao@gmail.com Fixes: 89e86854fb0a ("mm/arm64: override clear_young_dirty_ptes() batch helper") Signed-off-by: Barry Song Reviewed-by: Baolin Wang Acked-by: Lance Yang Acked-by: David Hildenbrand Acked-by: Chris Li Cc: Barry Song <21cnbao@gmail.com> Cc: Ryan Roberts Cc: Jeff Xie Cc: Kefeng Wang Cc: Michal Hocko Cc: Minchan Kim Cc: Muchun Song Cc: Peter Xu Cc: Yang Shi Cc: Yin Fengwei Cc: Zach O'Keefe Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arm64/mm/contpte.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 9f9486de0004d..a3edced29ac13 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -376,7 +376,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, * clearing access/dirty for the whole block. */ unsigned long start = addr; - unsigned long end = start + nr; + unsigned long end = start + nr * PAGE_SIZE; if (pte_cont(__ptep_get(ptep + nr - 1))) end = ALIGN(end, CONT_PTE_SIZE); @@ -386,7 +386,7 @@ void contpte_clear_young_dirty_ptes(struct vm_area_struct *vma, ptep = contpte_align_down(ptep); } - __clear_young_dirty_ptes(vma, start, ptep, end - start, flags); + __clear_young_dirty_ptes(vma, start, ptep, (end - start) / PAGE_SIZE, flags); } EXPORT_SYMBOL_GPL(contpte_clear_young_dirty_ptes); From 36eef400c2d571d05b6e41a9a61f874d4a7b82c5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 28 May 2024 16:13:41 +0200 Subject: [PATCH 339/537] memcg: remove the lockdep assert from __mod_objcg_mlstate() The assert was introduced in the commit cited below as an insurance that the semantic is the same after the local_irq_save() has been removed and the function has been made static. The original requirement to disable interrupt was due the modification of per-CPU counters which require interrupts to be disabled because the counter update operation is not atomic and some of the counters are updated from interrupt context. All callers of __mod_objcg_mlstate() acquire a lock (memcg_stock.stock_lock) which disables interrupts on !PREEMPT_RT and the lockdep assert is satisfied. On PREEMPT_RT the interrupts are not disabled and the assert triggers. The safety of the counter update is already ensured by VM_WARN_ON_IRQS_ENABLED() which is part of __mod_memcg_lruvec_state() and does not require yet another check. Remove the lockdep assert from __mod_objcg_mlstate(). Link: https://lkml.kernel.org/r/20240528141341.rz_rytN_@linutronix.de Fixes: 91882c1617c1 ("memcg: simple cleanup of stats update functions") Signed-off-by: Sebastian Andrzej Siewior Acked-by: Vlastimil Babka Acked-by: Shakeel Butt Acked-by: Michal Hocko Reviewed-by: Vlastimil Babka Cc: Johannes Weiner Cc: Muchun Song Cc: Roman Gushchin Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7fad15b2290c7..36793e509f470 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3147,8 +3147,6 @@ static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg, struct mem_cgroup *memcg; struct lruvec *lruvec; - lockdep_assert_irqs_disabled(); - rcu_read_lock(); memcg = obj_cgroup_memcg(objcg); lruvec = mem_cgroup_lruvec(memcg, pgdat); From a4ca369ca221bb7e06c725792ac107f0e48e82e7 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 30 May 2024 23:15:56 +0900 Subject: [PATCH 340/537] nilfs2: fix potential kernel bug due to lack of writeback flag waiting Destructive writes to a block device on which nilfs2 is mounted can cause a kernel bug in the folio/page writeback start routine or writeback end routine (__folio_start_writeback in the log below): kernel BUG at mm/page-writeback.c:3070! Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI ... RIP: 0010:__folio_start_writeback+0xbaa/0x10e0 Code: 25 ff 0f 00 00 0f 84 18 01 00 00 e8 40 ca c6 ff e9 17 f6 ff ff e8 36 ca c6 ff 4c 89 f7 48 c7 c6 80 c0 12 84 e8 e7 b3 0f 00 90 <0f> 0b e8 1f ca c6 ff 4c 89 f7 48 c7 c6 a0 c6 12 84 e8 d0 b3 0f 00 ... Call Trace: nilfs_segctor_do_construct+0x4654/0x69d0 [nilfs2] nilfs_segctor_construct+0x181/0x6b0 [nilfs2] nilfs_segctor_thread+0x548/0x11c0 [nilfs2] kthread+0x2f0/0x390 ret_from_fork+0x4b/0x80 ret_from_fork_asm+0x1a/0x30 This is because when the log writer starts a writeback for segment summary blocks or a super root block that use the backing device's page cache, it does not wait for the ongoing folio/page writeback, resulting in an inconsistent writeback state. Fix this issue by waiting for ongoing writebacks when putting folios/pages on the backing device into writeback state. Link: https://lkml.kernel.org/r/20240530141556.4411-1-konishi.ryusuke@gmail.com Fixes: 9ff05123e3bf ("nilfs2: segment constructor") Signed-off-by: Ryusuke Konishi Tested-by: Ryusuke Konishi Cc: Signed-off-by: Andrew Morton --- fs/nilfs2/segment.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 60d4f59f7665d..6ea81f1d50944 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1652,6 +1652,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) if (bh->b_folio != bd_folio) { if (bd_folio) { folio_lock(bd_folio); + folio_wait_writeback(bd_folio); folio_clear_dirty_for_io(bd_folio); folio_start_writeback(bd_folio); folio_unlock(bd_folio); @@ -1665,6 +1666,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) if (bh == segbuf->sb_super_root) { if (bh->b_folio != bd_folio) { folio_lock(bd_folio); + folio_wait_writeback(bd_folio); folio_clear_dirty_for_io(bd_folio); folio_start_writeback(bd_folio); folio_unlock(bd_folio); @@ -1681,6 +1683,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) } if (bd_folio) { folio_lock(bd_folio); + folio_wait_writeback(bd_folio); folio_clear_dirty_for_io(bd_folio); folio_start_writeback(bd_folio); folio_unlock(bd_folio); From 7cc5a5d65011983952a9c62f170f5b79e24b1239 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 29 May 2024 18:18:12 -0400 Subject: [PATCH 341/537] mm: page_alloc: fix highatomic typing in multi-block buddies Christoph reports a page allocator splat triggered by xfstests: generic/176 214s ... [ 1204.507931] run fstests generic/176 at 2024-05-27 12:52:30 XFS (nvme0n1): Mounting V5 Filesystem cd936307-415f-48a3-b99d-a2d52ae1f273 XFS (nvme0n1): Ending clean mount XFS (nvme1n1): Mounting V5 Filesystem ab3ee1a4-af62-4934-9a6a-6c2fde321850 XFS (nvme1n1): Ending clean mount XFS (nvme1n1): Unmounting Filesystem ab3ee1a4-af62-4934-9a6a-6c2fde321850 XFS (nvme1n1): Mounting V5 Filesystem 7099b02d-9c58-4d1d-be1d-2cc472d12cd9 XFS (nvme1n1): Ending clean mount ------------[ cut here ]------------ page type is 3, passed migratetype is 1 (nr=512) WARNING: CPU: 0 PID: 509870 at mm/page_alloc.c:645 expand+0x1c5/0x1f0 Modules linked in: i2c_i801 crc32_pclmul i2c_smbus [last unloaded: scsi_debug] CPU: 0 PID: 509870 Comm: xfs_io Not tainted 6.10.0-rc1+ #2437 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 RIP: 0010:expand+0x1c5/0x1f0 Code: 05 16 70 bf 02 01 e8 ca fc ff ff 8b 54 24 34 44 89 e1 48 c7 c7 80 a2 28 83 48 89 c6 b8 01 00 3 RSP: 0018:ffffc90003b2b968 EFLAGS: 00010082 RAX: 0000000000000000 RBX: ffffffff83fa9480 RCX: 0000000000000000 RDX: 0000000000000005 RSI: 0000000000000027 RDI: 00000000ffffffff RBP: 00000000001f2600 R08: 00000000fffeffff R09: 0000000000000001 R10: 0000000000000000 R11: ffffffff83676200 R12: 0000000000000009 R13: 0000000000000200 R14: 0000000000000001 R15: ffffea0007c98000 FS: 00007f72ca3d5780(0000) GS:ffff8881f9c00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f72ca1fff38 CR3: 00000001aa0c6002 CR4: 0000000000770ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff07f0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: ? __warn+0x7b/0x120 ? expand+0x1c5/0x1f0 ? report_bug+0x191/0x1c0 ? handle_bug+0x3c/0x80 ? exc_invalid_op+0x17/0x70 ? asm_exc_invalid_op+0x1a/0x20 ? expand+0x1c5/0x1f0 ? expand+0x1c5/0x1f0 __rmqueue_pcplist+0x3a9/0x730 get_page_from_freelist+0x7a0/0xf00 __alloc_pages_noprof+0x153/0x2e0 __folio_alloc_noprof+0x10/0xa0 __filemap_get_folio+0x16b/0x370 iomap_write_begin+0x496/0x680 While trying to service a movable allocation (page type 1), the page allocator runs into a two-pageblock buddy on the movable freelist whose second block is typed as highatomic (page type 3). This inconsistency is caused by the highatomic reservation system operating on single pageblocks, while MAX_ORDER can be bigger than that - in this configuration, pageblock_order is 9 while MAX_PAGE_ORDER is 10. The test case is observed to make several adjacent order-3 requests with __GFP_DIRECT_RECLAIM cleared, which marks the surrounding block as highatomic. Upon freeing, the blocks merge into an order-10 buddy. When the highatomic pool is drained later on, this order-10 buddy gets moved back to the movable list, but only the first pageblock is marked movable again. A subsequent expand() of this buddy warns about the tail being of a different type. This is a long-standing bug that's surfaced by the recent block type warnings added to the allocator. The consequences seem mostly benign, it just results in odd behavior: the highatomic tail blocks are not properly drained, instead they end up on the movable list first, then go back to the highatomic list after an alloc-free cycle. To fix this, make the highatomic reservation code aware that allocations/buddies can be larger than a pageblock. While it's an old quirk, the recently added type consistency warnings seem to be the most prominent consequence of it. Set the Fixes: tag accordingly to highlight this backporting dependency. Link: https://lkml.kernel.org/r/20240530114203.GA1222079@cmpxchg.org Fixes: e0932b6c1f94 ("mm: page_alloc: consolidate free page accounting") Signed-off-by: Johannes Weiner Reported-by: Christoph Hellwig Reviewed-by: Zi Yan Tested-by: Christoph Hellwig Cc: Andy Shevchenko Cc: Baolin Wang Cc: Mel Gorman Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 50 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2e22ce5675ca1..222299b5c0e6a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1955,10 +1955,12 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, } /* - * Reserve a pageblock for exclusive use of high-order atomic allocations if - * there are no empty page blocks that contain a page with a suitable order + * Reserve the pageblock(s) surrounding an allocation request for + * exclusive use of high-order atomic allocations if there are no + * empty page blocks that contain a page with a suitable order */ -static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) +static void reserve_highatomic_pageblock(struct page *page, int order, + struct zone *zone) { int mt; unsigned long max_managed, flags; @@ -1984,10 +1986,17 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) /* Yoink! */ mt = get_pageblock_migratetype(page); /* Only reserve normal pageblocks (i.e., they can merge with others) */ - if (migratetype_is_mergeable(mt)) - if (move_freepages_block(zone, page, mt, - MIGRATE_HIGHATOMIC) != -1) - zone->nr_reserved_highatomic += pageblock_nr_pages; + if (!migratetype_is_mergeable(mt)) + goto out_unlock; + + if (order < pageblock_order) { + if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) + goto out_unlock; + zone->nr_reserved_highatomic += pageblock_nr_pages; + } else { + change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); + zone->nr_reserved_highatomic += 1 << order; + } out_unlock: spin_unlock_irqrestore(&zone->lock, flags); @@ -1999,7 +2008,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) * intense memory pressure but failed atomic allocations should be easier * to recover from than an OOM. * - * If @force is true, try to unreserve a pageblock even though highatomic + * If @force is true, try to unreserve pageblocks even though highatomic * pageblock is exhausted. */ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, @@ -2041,6 +2050,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * adjust the count once. */ if (is_migrate_highatomic(mt)) { + unsigned long size; /* * It should never happen but changes to * locking could inadvertently allow a per-cpu @@ -2048,9 +2058,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * while unreserving so be safe and watch for * underflows. */ - zone->nr_reserved_highatomic -= min( - pageblock_nr_pages, - zone->nr_reserved_highatomic); + size = max(pageblock_nr_pages, 1UL << order); + size = min(size, zone->nr_reserved_highatomic); + zone->nr_reserved_highatomic -= size; } /* @@ -2062,11 +2072,19 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * of pageblocks that cannot be completely freed * may increase. */ - ret = move_freepages_block(zone, page, mt, - ac->migratetype); + if (order < pageblock_order) + ret = move_freepages_block(zone, page, mt, + ac->migratetype); + else { + move_to_free_list(page, zone, order, mt, + ac->migratetype); + change_pageblock_range(page, order, + ac->migratetype); + ret = 1; + } /* - * Reserving this block already succeeded, so this should - * not fail on zone boundaries. + * Reserving the block(s) already succeeded, + * so this should not fail on zone boundaries. */ WARN_ON_ONCE(ret == -1); if (ret > 0) { @@ -3406,7 +3424,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, * if the pageblock should be reserved for the future */ if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) - reserve_highatomic_pageblock(page, zone); + reserve_highatomic_pageblock(page, order, zone); return page; } else { From 0105eaabb27f31d9b8d340aca6fb6a3420cab30f Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 28 May 2024 09:08:38 -0700 Subject: [PATCH 342/537] vmalloc: check CONFIG_EXECMEM in is_vmalloc_or_module_addr() After commit 2c9e5d4a0082 ("bpf: remove CONFIG_BPF_JIT dependency on CONFIG_MODULES of") CONFIG_BPF_JIT does not depend on CONFIG_MODULES any more and bpf jit also uses the [MODULES_VADDR, MODULES_END] memory region. But is_vmalloc_or_module_addr() still checks CONFIG_MODULES, which then returns false for a bpf jit memory region when CONFIG_MODULES is not defined. It leads to the following kernel BUG: [ 1.567023] ------------[ cut here ]------------ [ 1.567883] kernel BUG at mm/vmalloc.c:745! [ 1.568477] Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN NOPTI [ 1.569367] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.9.0+ #448 [ 1.570247] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014 [ 1.570786] RIP: 0010:vmalloc_to_page+0x48/0x1ec [ 1.570786] Code: 0f 00 00 e8 eb 1a 05 00 b8 37 00 00 00 48 ba fe ff ff ff ff 1f 00 00 4c 03 25 76 49 c6 02 48 c1 e0 28 48 01 e8 48 39 d0 76 02 <0f> 0b 4c 89 e7 e8 bf 1a 05 00 49 8b 04 24 48 a9 9f ff ff ff 0f 84 [ 1.570786] RSP: 0018:ffff888007787960 EFLAGS: 00010212 [ 1.570786] RAX: 000036ffa0000000 RBX: 0000000000000640 RCX: ffffffff8147e93c [ 1.570786] RDX: 00001ffffffffffe RSI: dffffc0000000000 RDI: ffffffff840e32c8 [ 1.570786] RBP: ffffffffa0000000 R08: 0000000000000000 R09: 0000000000000000 [ 1.570786] R10: ffff888007787a88 R11: ffffffff8475d8e7 R12: ffffffff83e80ff8 [ 1.570786] R13: 0000000000000640 R14: 0000000000000640 R15: 0000000000000640 [ 1.570786] FS: 0000000000000000(0000) GS:ffff88806cc00000(0000) knlGS:0000000000000000 [ 1.570786] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1.570786] CR2: ffff888006a01000 CR3: 0000000003e80000 CR4: 0000000000350ef0 [ 1.570786] Call Trace: [ 1.570786] [ 1.570786] ? __die_body+0x1b/0x58 [ 1.570786] ? die+0x31/0x4b [ 1.570786] ? do_trap+0x9d/0x138 [ 1.570786] ? vmalloc_to_page+0x48/0x1ec [ 1.570786] ? do_error_trap+0xcd/0x102 [ 1.570786] ? vmalloc_to_page+0x48/0x1ec [ 1.570786] ? vmalloc_to_page+0x48/0x1ec [ 1.570786] ? handle_invalid_op+0x2f/0x38 [ 1.570786] ? vmalloc_to_page+0x48/0x1ec [ 1.570786] ? exc_invalid_op+0x2b/0x41 [ 1.570786] ? asm_exc_invalid_op+0x16/0x20 [ 1.570786] ? vmalloc_to_page+0x26/0x1ec [ 1.570786] ? vmalloc_to_page+0x48/0x1ec [ 1.570786] __text_poke+0xb6/0x458 [ 1.570786] ? __pfx_text_poke_memcpy+0x10/0x10 [ 1.570786] ? __pfx___mutex_lock+0x10/0x10 [ 1.570786] ? __pfx___text_poke+0x10/0x10 [ 1.570786] ? __pfx_get_random_u32+0x10/0x10 [ 1.570786] ? srso_return_thunk+0x5/0x5f [ 1.570786] text_poke_copy_locked+0x70/0x84 [ 1.570786] text_poke_copy+0x32/0x4f [ 1.570786] bpf_arch_text_copy+0xf/0x27 [ 1.570786] bpf_jit_binary_pack_finalize+0x26/0x5a [ 1.570786] bpf_int_jit_compile+0x576/0x8ad [ 1.570786] ? __pfx_bpf_int_jit_compile+0x10/0x10 [ 1.570786] ? srso_return_thunk+0x5/0x5f [ 1.570786] ? __kmalloc_node_track_caller+0x2b5/0x2e0 [ 1.570786] bpf_prog_select_runtime+0x7c/0x199 [ 1.570786] bpf_prepare_filter+0x1e9/0x25b [ 1.570786] ? __pfx_bpf_prepare_filter+0x10/0x10 [ 1.570786] ? srso_return_thunk+0x5/0x5f [ 1.570786] ? _find_next_bit+0x29/0x7e [ 1.570786] bpf_prog_create+0xb8/0xe0 [ 1.570786] ptp_classifier_init+0x75/0xa1 [ 1.570786] ? __pfx_ptp_classifier_init+0x10/0x10 [ 1.570786] ? srso_return_thunk+0x5/0x5f [ 1.570786] ? register_pernet_subsys+0x36/0x42 [ 1.570786] ? srso_return_thunk+0x5/0x5f [ 1.570786] sock_init+0x99/0xa3 [ 1.570786] ? __pfx_sock_init+0x10/0x10 [ 1.570786] do_one_initcall+0x104/0x2c4 [ 1.570786] ? __pfx_do_one_initcall+0x10/0x10 [ 1.570786] ? parameq+0x25/0x2d [ 1.570786] ? rcu_is_watching+0x1c/0x3c [ 1.570786] ? trace_kmalloc+0x81/0xb2 [ 1.570786] ? srso_return_thunk+0x5/0x5f [ 1.570786] ? __kmalloc+0x29c/0x2c7 [ 1.570786] ? srso_return_thunk+0x5/0x5f [ 1.570786] do_initcalls+0xf9/0x123 [ 1.570786] kernel_init_freeable+0x24f/0x289 [ 1.570786] ? __pfx_kernel_init+0x10/0x10 [ 1.570786] kernel_init+0x19/0x13a [ 1.570786] ret_from_fork+0x24/0x41 [ 1.570786] ? __pfx_kernel_init+0x10/0x10 [ 1.570786] ret_from_fork_asm+0x1a/0x30 [ 1.570786] [ 1.570819] ---[ end trace 0000000000000000 ]--- [ 1.571463] RIP: 0010:vmalloc_to_page+0x48/0x1ec [ 1.572111] Code: 0f 00 00 e8 eb 1a 05 00 b8 37 00 00 00 48 ba fe ff ff ff ff 1f 00 00 4c 03 25 76 49 c6 02 48 c1 e0 28 48 01 e8 48 39 d0 76 02 <0f> 0b 4c 89 e7 e8 bf 1a 05 00 49 8b 04 24 48 a9 9f ff ff ff 0f 84 [ 1.574632] RSP: 0018:ffff888007787960 EFLAGS: 00010212 [ 1.575129] RAX: 000036ffa0000000 RBX: 0000000000000640 RCX: ffffffff8147e93c [ 1.576097] RDX: 00001ffffffffffe RSI: dffffc0000000000 RDI: ffffffff840e32c8 [ 1.577084] RBP: ffffffffa0000000 R08: 0000000000000000 R09: 0000000000000000 [ 1.578077] R10: ffff888007787a88 R11: ffffffff8475d8e7 R12: ffffffff83e80ff8 [ 1.578810] R13: 0000000000000640 R14: 0000000000000640 R15: 0000000000000640 [ 1.579823] FS: 0000000000000000(0000) GS:ffff88806cc00000(0000) knlGS:0000000000000000 [ 1.580992] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1.581869] CR2: ffff888006a01000 CR3: 0000000003e80000 CR4: 0000000000350ef0 [ 1.582800] Kernel panic - not syncing: Fatal exception [ 1.583765] ---[ end Kernel panic - not syncing: Fatal exception ]--- Fix this by checking CONFIG_EXECMEM instead. Link: https://lkml.kernel.org/r/20240528160838.102223-1-xiyou.wangcong@gmail.com Fixes: 2c9e5d4a0082 ("bpf: remove CONFIG_BPF_JIT dependency on CONFIG_MODULES of") Signed-off-by: Cong Wang Acked-by: Mike Rapoport (IBM) Cc: Luis Chamberlain Cc: Daniel Borkmann Signed-off-by: Andrew Morton --- mm/vmalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 5d3aa2dc88a83..45e1506d58c3a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -722,7 +722,7 @@ int is_vmalloc_or_module_addr(const void *x) * and fall back on vmalloc() if that fails. Others * just put it in the vmalloc space. */ -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) +#if defined(CONFIG_EXECMEM) && defined(MODULES_VADDR) unsigned long addr = (unsigned long)kasan_reset_tag(x); if (addr >= MODULES_VADDR && addr < MODULES_END) return 1; From 2ef3cec44c60ae171b287db7fc2aa341586d65ba Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 28 May 2024 12:48:06 +0200 Subject: [PATCH 343/537] kmsan: do not wipe out origin when doing partial unpoisoning As noticed by Brian, KMSAN should not be zeroing the origin when unpoisoning parts of a four-byte uninitialized value, e.g.: char a[4]; kmsan_unpoison_memory(a, 1); This led to false negatives, as certain poisoned values could receive zero origins, preventing those values from being reported. To fix the problem, check that kmsan_internal_set_shadow_origin() writes zero origins only to slots which have zero shadow. Link: https://lkml.kernel.org/r/20240528104807.738758-1-glider@google.com Fixes: f80be4571b19 ("kmsan: add KMSAN runtime core") Signed-off-by: Alexander Potapenko Reported-by: Brian Johannesmeyer Link: https://lore.kernel.org/lkml/20240524232804.1984355-1-bjohannesmeyer@gmail.com/T/ Reviewed-by: Marco Elver Tested-by: Brian Johannesmeyer Cc: Dmitry Vyukov Cc: Kees Cook Cc: Signed-off-by: Andrew Morton --- mm/kmsan/core.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c index cf2d70e9c9a5f..95f859e38c533 100644 --- a/mm/kmsan/core.c +++ b/mm/kmsan/core.c @@ -196,8 +196,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b, u32 origin, bool checked) { u64 address = (u64)addr; - void *shadow_start; - u32 *origin_start; + u32 *shadow_start, *origin_start; size_t pad = 0; KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size)); @@ -225,8 +224,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b, origin_start = (u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN); - for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) - origin_start[i] = origin; + /* + * If the new origin is non-zero, assume that the shadow byte is also non-zero, + * and unconditionally overwrite the old origin slot. + * If the new origin is zero, overwrite the old origin slot iff the + * corresponding shadow slot is zero. + */ + for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) { + if (origin || !shadow_start[i]) + origin_start[i] = origin; + } } struct page *kmsan_vmalloc_to_page_or_null(void *vaddr) From 730cdc2c72c6905a2eda2fccbbf67dcef1206590 Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Tue, 28 May 2024 13:15:21 +0800 Subject: [PATCH 344/537] mm/ksm: fix ksm_pages_scanned accounting Patch series "mm/ksm: fix some accounting problems", v3. We encountered some abnormal ksm_pages_scanned and ksm_zero_pages during some random tests. 1. ksm_pages_scanned unchanged even ksmd scanning has progress. 2. ksm_zero_pages maybe -1 in some rare cases. This patch (of 2): During testing, I found ksm_pages_scanned is unchanged although the scan_get_next_rmap_item() did return valid rmap_item that is not NULL. The reason is the scan_get_next_rmap_item() will return NULL after a full scan, so ksm_do_scan() just return without accounting of the ksm_pages_scanned. Fix it by just putting ksm_pages_scanned accounting in that loop, and it will be accounted more timely if that loop would last for a long time. Link: https://lkml.kernel.org/r/20240528-b4-ksm-counters-v3-0-34bb358fdc13@linux.dev Link: https://lkml.kernel.org/r/20240528-b4-ksm-counters-v3-1-34bb358fdc13@linux.dev Fixes: b348b5fe2b5f ("mm/ksm: add pages scanned metric") Signed-off-by: Chengming Zhou Acked-by: David Hildenbrand Reviewed-by: xu xin Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Ran Xiaokai Cc: Stefan Roesch Cc: Yang Yang Cc: Signed-off-by: Andrew Morton --- mm/ksm.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 452ac8346e6e2..9e99cb12d3300 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2754,18 +2754,16 @@ static void ksm_do_scan(unsigned int scan_npages) { struct ksm_rmap_item *rmap_item; struct page *page; - unsigned int npages = scan_npages; - while (npages-- && likely(!freezing(current))) { + while (scan_npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) return; cmp_and_merge_page(page, rmap_item); put_page(page); + ksm_pages_scanned++; } - - ksm_pages_scanned += scan_npages - npages; } static int ksmd_should_run(void) From c2dc78b86e0821ecf9a9d0c35dba2618279a5bb6 Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Tue, 28 May 2024 13:15:22 +0800 Subject: [PATCH 345/537] mm/ksm: fix ksm_zero_pages accounting We normally ksm_zero_pages++ in ksmd when page is merged with zero page, but ksm_zero_pages-- is done from page tables side, where there is no any accessing protection of ksm_zero_pages. So we can read very exceptional value of ksm_zero_pages in rare cases, such as -1, which is very confusing to users. Fix it by changing to use atomic_long_t, and the same case with the mm->ksm_zero_pages. Link: https://lkml.kernel.org/r/20240528-b4-ksm-counters-v3-2-34bb358fdc13@linux.dev Fixes: e2942062e01d ("ksm: count all zero pages placed by KSM") Fixes: 6080d19f0704 ("ksm: add ksm zero pages for each process") Signed-off-by: Chengming Zhou Acked-by: David Hildenbrand Cc: Andrea Arcangeli Cc: Hugh Dickins Cc: Ran Xiaokai Cc: Stefan Roesch Cc: xu xin Cc: Yang Yang Cc: Signed-off-by: Andrew Morton --- fs/proc/base.c | 2 +- include/linux/ksm.h | 17 ++++++++++++++--- include/linux/mm_types.h | 2 +- mm/ksm.c | 11 +++++------ 4 files changed, 21 insertions(+), 11 deletions(-) diff --git a/fs/proc/base.c b/fs/proc/base.c index 18550c071d71c..72a1acd03675c 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, mm = get_task_mm(task); if (mm) { seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items); - seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages); + seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm)); seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages); seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm)); mmput(mm); diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 52c63a9c5a9cd..11690dacd9868 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm); */ #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) -extern unsigned long ksm_zero_pages; +extern atomic_long_t ksm_zero_pages; + +static inline void ksm_map_zero_page(struct mm_struct *mm) +{ + atomic_long_inc(&ksm_zero_pages); + atomic_long_inc(&mm->ksm_zero_pages); +} static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { if (is_ksm_zero_pte(pte)) { - ksm_zero_pages--; - mm->ksm_zero_pages--; + atomic_long_dec(&ksm_zero_pages); + atomic_long_dec(&mm->ksm_zero_pages); } } +static inline long mm_ksm_zero_pages(struct mm_struct *mm) +{ + return atomic_long_read(&mm->ksm_zero_pages); +} + static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 24323c7d0bd48..af3a0256fa93b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -985,7 +985,7 @@ struct mm_struct { * Represent how many empty pages are merged with kernel zero * pages when enabling KSM use_zero_pages. */ - unsigned long ksm_zero_pages; + atomic_long_t ksm_zero_pages; #endif /* CONFIG_KSM */ #ifdef CONFIG_LRU_GEN_WALKS_MMU struct { diff --git a/mm/ksm.c b/mm/ksm.c index 9e99cb12d3300..34c4820e0d3df 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly; static bool ksm_smart_scan = true; /* The number of zero pages which is placed by KSM */ -unsigned long ksm_zero_pages; +atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0); /* The number of pages that have been skipped due to "smart scanning" */ static unsigned long ksm_pages_skipped; @@ -1429,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * the dirty bit in zero page's PTE is set. */ newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); - ksm_zero_pages++; - mm->ksm_zero_pages++; + ksm_map_zero_page(mm); /* * We're replacing an anonymous page with a zero page, which is * not anonymous. We need to do proper accounting otherwise we @@ -3374,7 +3373,7 @@ static void wait_while_offlining(void) #ifdef CONFIG_PROC_FS long ksm_process_profit(struct mm_struct *mm) { - return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE - + return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); } #endif /* CONFIG_PROC_FS */ @@ -3663,7 +3662,7 @@ KSM_ATTR_RO(pages_skipped); static ssize_t ksm_zero_pages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sysfs_emit(buf, "%ld\n", ksm_zero_pages); + return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages)); } KSM_ATTR_RO(ksm_zero_pages); @@ -3672,7 +3671,7 @@ static ssize_t general_profit_show(struct kobject *kobj, { long general_profit; - general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE - + general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - ksm_rmap_items * sizeof(struct ksm_rmap_item); return sysfs_emit(buf, "%ld\n", general_profit); From 8daf9c702ee7f825f0de8600abff764acfedea13 Mon Sep 17 00:00:00 2001 From: Oscar Salvador Date: Tue, 28 May 2024 22:53:23 +0200 Subject: [PATCH 346/537] mm/hugetlb: do not call vma_add_reservation upon ENOMEM sysbot reported a splat [1] on __unmap_hugepage_range(). This is because vma_needs_reservation() can return -ENOMEM if allocate_file_region_entries() fails to allocate the file_region struct for the reservation. Check for that and do not call vma_add_reservation() if that is the case, otherwise region_abort() and region_del() will see that we do not have any file_regions. If we detect that vma_needs_reservation() returned -ENOMEM, we clear the hugetlb_restore_reserve flag as if this reservation was still consumed, so free_huge_folio() will not increment the resv count. [1] https://lore.kernel.org/linux-mm/0000000000004096100617c58d54@google.com/T/#ma5983bc1ab18a54910da83416b3f89f3c7ee43aa Link: https://lkml.kernel.org/r/20240528205323.20439-1-osalvador@suse.de Fixes: df7a6d1f6405 ("mm/hugetlb: restore the reservation if needed") Signed-off-by: Oscar Salvador Reported-and-tested-by: syzbot+d3fe2dc5ffe9380b714b@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-mm/0000000000004096100617c58d54@google.com/ Cc: Breno Leitao Cc: Muchun Song Cc: Signed-off-by: Andrew Morton --- mm/hugetlb.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 6be78e7d4f6e0..f35abff8be60f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5768,8 +5768,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, * do_exit() will not see it, and will keep the reservation * forever. */ - if (adjust_reservation && vma_needs_reservation(h, vma, address)) - vma_add_reservation(h, vma, address); + if (adjust_reservation) { + int rc = vma_needs_reservation(h, vma, address); + + if (rc < 0) + /* Pressumably allocate_file_region_entries failed + * to allocate a file_region struct. Clear + * hugetlb_restore_reserve so that global reserve + * count will not be incremented by free_huge_folio. + * Act as if we consumed the reservation. + */ + folio_clear_hugetlb_restore_reserve(page_folio(page)); + else if (rc) + vma_add_reservation(h, vma, address); + } tlb_remove_page_size(tlb, page, huge_page_size(h)); /* From 3f0c44c8c21cfa3bb6b756b939491b7a60932cd1 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Mon, 27 May 2024 15:30:06 -0300 Subject: [PATCH 347/537] codetag: avoid race at alloc_slab_obj_exts When CONFIG_MEM_ALLOC_PROFILING_DEBUG is enabled, the following warning may be noticed: [ 48.299584] ------------[ cut here ]------------ [ 48.300092] alloc_tag was not set [ 48.300528] WARNING: CPU: 2 PID: 1361 at include/linux/alloc_tag.h:130 alloc_tagging_slab_free_hook+0x84/0xc7 [ 48.301305] Modules linked in: [ 48.301553] CPU: 2 PID: 1361 Comm: systemd-udevd Not tainted 6.10.0-rc1-00003-gac8755535862 #176 [ 48.302196] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014 [ 48.302752] RIP: 0010:alloc_tagging_slab_free_hook+0x84/0xc7 [ 48.303169] Code: 8d 1c c4 48 85 db 74 4d 48 83 3b 00 75 1e 80 3d 65 02 86 04 00 75 15 48 c7 c7 11 48 1d 85 c6 05 55 02 86 04 01 e8 64 44 a5 ff <0f> 0b 48 8b 03 48 85 c0 74 21 48 83 f8 01 74 14 48 8b 50 20 48 f7 [ 48.304411] RSP: 0018:ffff8880111b7d40 EFLAGS: 00010282 [ 48.304916] RAX: 0000000000000000 RBX: ffff88800fcc9008 RCX: 0000000000000000 [ 48.305455] RDX: 0000000080000000 RSI: ffff888014060000 RDI: ffffed1002236f97 [ 48.305979] RBP: 0000000000001100 R08: fffffbfff0aa73a1 R09: 0000000000000000 [ 48.306473] R10: ffffffff814515e5 R11: 0000000000000003 R12: ffff88800fcc9000 [ 48.306943] R13: ffff88800b2e5cc0 R14: ffff8880111b7d90 R15: 0000000000000000 [ 48.307529] FS: 00007faf5d1908c0(0000) GS:ffff88806cf00000(0000) knlGS:0000000000000000 [ 48.308223] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 48.308710] CR2: 000058fb220c9118 CR3: 00000000110cc000 CR4: 0000000000750ef0 [ 48.309274] PKRU: 55555554 [ 48.309804] Call Trace: [ 48.310029] [ 48.310290] ? show_regs+0x84/0x8d [ 48.310722] ? alloc_tagging_slab_free_hook+0x84/0xc7 [ 48.311298] ? __warn+0x13b/0x2ff [ 48.311580] ? alloc_tagging_slab_free_hook+0x84/0xc7 [ 48.311987] ? report_bug+0x2ce/0x3ab [ 48.312292] ? handle_bug+0x8c/0x107 [ 48.312563] ? exc_invalid_op+0x34/0x6f [ 48.312842] ? asm_exc_invalid_op+0x1a/0x20 [ 48.313173] ? this_cpu_in_panic+0x1c/0x72 [ 48.313503] ? alloc_tagging_slab_free_hook+0x84/0xc7 [ 48.313880] ? putname+0x143/0x14e [ 48.314152] kmem_cache_free+0xe9/0x214 [ 48.314454] putname+0x143/0x14e [ 48.314712] do_unlinkat+0x413/0x45e [ 48.315001] ? __pfx_do_unlinkat+0x10/0x10 [ 48.315388] ? __check_object_size+0x4d7/0x525 [ 48.315744] ? __sanitizer_cov_trace_pc+0x20/0x4a [ 48.316167] ? __sanitizer_cov_trace_pc+0x20/0x4a [ 48.316757] ? getname_flags+0x4ed/0x500 [ 48.317261] __x64_sys_unlink+0x42/0x4a [ 48.317741] do_syscall_64+0xe2/0x149 [ 48.318171] entry_SYSCALL_64_after_hwframe+0x76/0x7e [ 48.318602] RIP: 0033:0x7faf5d8850ab [ 48.318891] Code: fd ff ff e8 27 dd 01 00 0f 1f 80 00 00 00 00 f3 0f 1e fa b8 5f 00 00 00 0f 05 c3 0f 1f 40 00 f3 0f 1e fa b8 57 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 05 c3 0f 1f 40 00 48 8b 15 41 2d 0e 00 f7 d8 [ 48.320649] RSP: 002b:00007ffc44982b38 EFLAGS: 00000246 ORIG_RAX: 0000000000000057 [ 48.321182] RAX: ffffffffffffffda RBX: 00005ba344a44680 RCX: 00007faf5d8850ab [ 48.321667] RDX: 0000000000000000 RSI: 00005ba344a44430 RDI: 00007ffc44982b40 [ 48.322139] RBP: 00007ffc44982c00 R08: 0000000000000000 R09: 0000000000000007 [ 48.322598] R10: 00005ba344a44430 R11: 0000000000000246 R12: 0000000000000000 [ 48.323071] R13: 00007ffc44982b40 R14: 0000000000000000 R15: 0000000000000000 [ 48.323596] This is due to a race when two objects are allocated from the same slab, which did not have an obj_exts allocated for. In such a case, the two threads will notice the NULL obj_exts and after one assigns slab->obj_exts, the second one will happily do the exchange if it reads this new assigned value. In order to avoid that, verify that the read obj_exts does not point to an allocated obj_exts before doing the exchange. Link: https://lkml.kernel.org/r/20240527183007.1595037-1-cascardo@igalia.com Fixes: 09c46563ff6d ("codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations") Signed-off-by: Thadeu Lima de Souza Cascardo Acked-by: Vlastimil Babka Cc: Suren Baghdasaryan Cc: Christoph Lameter Cc: David Rientjes Cc: Gustavo A. R. Silva Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Joonsoo Kim Cc: Kees Cook Cc: Pekka Enberg Cc: Roman Gushchin Cc: Thadeu Lima de Souza Cascardo Cc: Kent Overstreet Signed-off-by: Andrew Morton --- mm/slub.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 0809760cf789d..1373ac365a46f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1952,7 +1952,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, #ifdef CONFIG_MEMCG new_exts |= MEMCG_DATA_OBJEXTS; #endif - old_exts = slab->obj_exts; + old_exts = READ_ONCE(slab->obj_exts); handle_failed_objexts_alloc(old_exts, vec, objects); if (new_slab) { /* @@ -1961,7 +1961,8 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, * be simply assigned. */ slab->obj_exts = new_exts; - } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { + } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || + cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { /* * If the slab is already in use, somebody can allocate and * assign slabobj_exts in parallel. In this case the existing From 9415983599413f847ec9f081e9f9e5ed6cdeb342 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 31 May 2024 13:53:50 -0700 Subject: [PATCH 348/537] mm: fix xyz_noprof functions calling profiled functions Grepping /proc/allocinfo for "noprof" reveals several xyz_noprof functions, which means internally they are calling profiled functions. This should never happen as such calls move allocation charge from a higher level location where it should be accounted for into these lower level helpers. Fix this by replacing profiled function calls with noprof ones. Link: https://lkml.kernel.org/r/20240531205350.3973009-1-surenb@google.com Fixes: b951aaff5035 ("mm: enable page allocation tagging") Fixes: e26d8769da6d ("mempool: hook up to memory allocation profiling") Fixes: 88ae5fb755b0 ("mm: vmalloc: enable memory allocation profiling") Signed-off-by: Suren Baghdasaryan Cc: Kent Overstreet Reviewed-by: Kees Cook Acked-by: Vlastimil Babka Cc: Pasha Tatashin Signed-off-by: Andrew Morton --- mm/filemap.c | 2 +- mm/mempool.c | 2 +- mm/util.c | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 382c3d06bfb10..876cc64aadd7c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1000,7 +1000,7 @@ struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) do { cpuset_mems_cookie = read_mems_allowed_begin(); n = cpuset_mem_spread_node(); - folio = __folio_alloc_node(gfp, order, n); + folio = __folio_alloc_node_noprof(gfp, order, n); } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); return folio; diff --git a/mm/mempool.c b/mm/mempool.c index 6ece63a00acf5..3223337135d0a 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -273,7 +273,7 @@ mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, { mempool_t *pool; - pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); + pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); if (!pool) return NULL; diff --git a/mm/util.c b/mm/util.c index c9e519e6811f5..6c3e6710e4def 100644 --- a/mm/util.c +++ b/mm/util.c @@ -705,7 +705,7 @@ void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flag if (oldsize >= newsize) return (void *)p; - newp = kvmalloc(newsize, flags); + newp = kvmalloc_noprof(newsize, flags); if (!newp) return NULL; memcpy(newp, p, oldsize); @@ -726,7 +726,7 @@ void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; - return __vmalloc(bytes, flags); + return __vmalloc_noprof(bytes, flags); } EXPORT_SYMBOL(__vmalloc_array_noprof); @@ -737,7 +737,7 @@ EXPORT_SYMBOL(__vmalloc_array_noprof); */ void *vmalloc_array_noprof(size_t n, size_t size) { - return __vmalloc_array(n, size, GFP_KERNEL); + return __vmalloc_array_noprof(n, size, GFP_KERNEL); } EXPORT_SYMBOL(vmalloc_array_noprof); @@ -749,7 +749,7 @@ EXPORT_SYMBOL(vmalloc_array_noprof); */ void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) { - return __vmalloc_array(n, size, flags | __GFP_ZERO); + return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO); } EXPORT_SYMBOL(__vcalloc_noprof); @@ -760,7 +760,7 @@ EXPORT_SYMBOL(__vcalloc_noprof); */ void *vcalloc_noprof(size_t n, size_t size) { - return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); + return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO); } EXPORT_SYMBOL(vcalloc_noprof); From 7373a51e7998b508af7136530f3a997b286ce81c Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 4 Jun 2024 22:42:55 +0900 Subject: [PATCH 349/537] nilfs2: fix nilfs_empty_dir() misjudgment and long loop on I/O errors The error handling in nilfs_empty_dir() when a directory folio/page read fails is incorrect, as in the old ext2 implementation, and if the folio/page cannot be read or nilfs_check_folio() fails, it will falsely determine the directory as empty and corrupt the file system. In addition, since nilfs_empty_dir() does not immediately return on a failed folio/page read, but continues to loop, this can cause a long loop with I/O if i_size of the directory's inode is also corrupted, causing the log writer thread to wait and hang, as reported by syzbot. Fix these issues by making nilfs_empty_dir() immediately return a false value (0) if it fails to get a directory folio/page. Link: https://lkml.kernel.org/r/20240604134255.7165-1-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi Reported-by: syzbot+c8166c541d3971bf6c87@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=c8166c541d3971bf6c87 Fixes: 2ba466d74ed7 ("nilfs2: directory entry operations") Tested-by: Ryusuke Konishi Cc: Signed-off-by: Andrew Morton --- fs/nilfs2/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index a002a44ff1617..52e50b1b7f22d 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -607,7 +607,7 @@ int nilfs_empty_dir(struct inode *inode) kaddr = nilfs_get_folio(inode, i, &folio); if (IS_ERR(kaddr)) - continue; + return 0; de = (struct nilfs_dir_entry *)kaddr; kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1); From b91e05f1fcf755c9d2c4ca10907383e39def05bd Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sat, 18 May 2024 00:28:58 +0100 Subject: [PATCH 350/537] drm/vmwgfx: remove unused struct 'vmw_stdu_dma' 'vmw_stdu_dma' is unused since commit 39985eea5a6d ("drm/vmwgfx: Abstract placement selection") Remove it. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20240517232858.230860-1-linux@treblig.org --- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index afc0bc4ef4e1e..a04e0736318da 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -92,11 +92,6 @@ struct vmw_stdu_update { SVGA3dCmdUpdateGBScreenTarget body; }; -struct vmw_stdu_dma { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA body; -}; - struct vmw_stdu_surface_copy { SVGA3dCmdHeader header; SVGA3dCmdSurfaceCopy body; From 03e4a092be8ce3de7c1baa7ae14e68b64e3ea644 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 3 Jun 2024 14:42:30 -0700 Subject: [PATCH 351/537] ice: fix iteration of TLVs in Preserved Fields Area The ice_get_pfa_module_tlv() function iterates over the Type-Length-Value structures in the Preserved Fields Area (PFA) of the NVM. This is used by the driver to access data such as the Part Board Assembly identifier. The function uses simple logic to iterate over the PFA. First, the pointer to the PFA in the NVM is read. Then the total length of the PFA is read from the first word. A pointer to the first TLV is initialized, and a simple loop iterates over each TLV. The pointer is moved forward through the NVM until it exceeds the PFA area. The logic seems sound, but it is missing a key detail. The Preserved Fields Area length includes one additional final word. This is documented in the device data sheet as a dummy word which contains 0xFFFF. All NVMs have this extra word. If the driver tries to scan for a TLV that is not in the PFA, it will read past the size of the PFA. It reads and interprets the last dummy word of the PFA as a TLV with type 0xFFFF. It then reads the word following the PFA as a length. The PFA resides within the Shadow RAM portion of the NVM, which is relatively small. All of its offsets are within a 16-bit size. The PFA pointer and TLV pointer are stored by the driver as 16-bit values. In almost all cases, the word following the PFA will be such that interpreting it as a length will result in 16-bit arithmetic overflow. Once overflowed, the new next_tlv value is now below the maximum offset of the PFA. Thus, the driver will continue to iterate the data as TLVs. In the worst case, the driver hits on a sequence of reads which loop back to reading the same offsets in an endless loop. To fix this, we need to correct the loop iteration check to account for this extra word at the end of the PFA. This alone is sufficient to resolve the known cases of this issue in the field. However, it is plausible that an NVM could be misconfigured or have corrupt data which results in the same kind of overflow. Protect against this by using check_add_overflow when calculating both the maximum offset of the TLVs, and when calculating the next_tlv offset at the end of each loop iteration. This ensures that the driver will not get stuck in an infinite loop when scanning the PFA. Fixes: e961b679fb0b ("ice: add board identifier info to devlink .info_get") Co-developed-by: Paul Greenwalt Signed-off-by: Paul Greenwalt Reviewed-by: Przemek Kitszel Tested-by: Pucha Himasekhar Reddy Signed-off-by: Jacob Keller Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-1-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice_nvm.c | 28 ++++++++++++++++++------ 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 84eab92dc03cf..ea7cbdf8492d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -440,8 +440,7 @@ int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - u16 pfa_len, pfa_ptr; - u16 next_tlv; + u16 pfa_len, pfa_ptr, next_tlv, max_tlv; int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); @@ -454,11 +453,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); return status; } + + /* The Preserved Fields Area contains a sequence of Type-Length-Value + * structures which define its contents. The PFA length includes all + * of the TLVs, plus the initial length word itself, *and* one final + * word at the end after all of the TLVs. + */ + if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) { + dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n", + pfa_ptr, pfa_len); + return -EINVAL; + } + /* Starting with first TLV after PFA length, iterate through the list * of TLVs to find the requested one. */ next_tlv = pfa_ptr + 1; - while (next_tlv < pfa_ptr + pfa_len) { + while (next_tlv < max_tlv) { u16 tlv_sub_module_type; u16 tlv_len; @@ -482,10 +493,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, } return -EINVAL; } - /* Check next TLV, i.e. current TLV pointer + length + 2 words - * (for current TLV's type and length) - */ - next_tlv = next_tlv + tlv_len + 2; + + if (check_add_overflow(next_tlv, 2, &next_tlv) || + check_add_overflow(next_tlv, tlv_len, &next_tlv)) { + dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n", + tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len); + return -EINVAL; + } } /* Module does not exist */ return -ENOENT; From cfa747a66e5da34793ac08c26b814e7709613fab Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 3 Jun 2024 14:42:31 -0700 Subject: [PATCH 352/537] ice: fix reads from NVM Shadow RAM on E830 and E825-C devices The ice driver reads data from the Shadow RAM portion of the NVM during initialization, including data used to identify the NVM image and device, such as the ETRACK ID used to populate devlink dev info fw.bundle. Currently it is using a fixed offset defined by ICE_CSS_HEADER_LENGTH to compute the appropriate offset. This worked fine for E810 and E822 devices which both have CSS header length of 330 words. Other devices, including both E825-C and E830 devices have different sizes for their CSS header. The use of a hard coded value results in the driver reading from the wrong block in the NVM when attempting to access the Shadow RAM copy. This results in the driver reporting the fw.bundle as 0x0 in both the devlink dev info and ethtool -i output. The first E830 support was introduced by commit ba20ecb1d1bb ("ice: Hook up 4 E830 devices by adding their IDs") and the first E825-C support was introducted by commit f64e18944233 ("ice: introduce new E825C devices family") The NVM actually contains the CSS header length embedded in it. Remove the hard coded value and replace it with logic to read the length from the NVM directly. This is more resilient against all existing and future hardware, vs looking up the expected values from a table. It ensures the driver will read from the appropriate place when determining the ETRACK ID value used for populating the fw.bundle_id and for reporting in ethtool -i. The CSS header length for both the active and inactive flash bank is stored in the ice_bank_info structure to avoid unnecessary duplicate work when accessing multiple words of the Shadow RAM. Both banks are read in the unlikely event that the header length is different for the NVM in the inactive bank, rather than being different only by the overall device family. Fixes: ba20ecb1d1bb ("ice: Hook up 4 E830 devices by adding their IDs") Co-developed-by: Paul Greenwalt Signed-off-by: Paul Greenwalt Reviewed-by: Przemek Kitszel Tested-by: Pucha Himasekhar Reddy Signed-off-by: Jacob Keller Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-2-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice_nvm.c | 88 ++++++++++++++++++++++- drivers/net/ethernet/intel/ice/ice_type.h | 14 ++-- 2 files changed, 93 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index ea7cbdf8492d6..59e8879ac0598 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -374,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 * * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. + * + * Note that the Shadow RAM copy is always located after the CSS header, and + * is aligned to 64-byte (32-word) offsets. */ static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); + u32 sr_copy; + + switch (bank) { + case ICE_ACTIVE_FLASH_BANK: + sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32); + break; + case ICE_INACTIVE_FLASH_BANK: + sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32); + break; + } + + return ice_read_nvm_module(hw, bank, sr_copy + offset, data); } /** @@ -1023,6 +1037,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw) return 0; } +/** + * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @hdr_len: storage for header length in words + * + * Read the CSS header length from the NVM CSS header and add the Authentication + * header size, and then convert to words. + * + * Return: zero on success, or a negative error code on failure. + */ +static int +ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank, + u32 *hdr_len) +{ + u16 hdr_len_l, hdr_len_h; + u32 hdr_len_dword; + int status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L, + &hdr_len_l); + if (status) + return status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H, + &hdr_len_h); + if (status) + return status; + + /* CSS header length is in DWORD, so convert to words and add + * authentication header size + */ + hdr_len_dword = hdr_len_h << 16 | hdr_len_l; + *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN; + + return 0; +} + +/** + * ice_determine_css_hdr_len - Discover CSS header length for the device + * @hw: pointer to the HW struct + * + * Determine the size of the CSS header at the start of the NVM module. This + * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is + * always located just after the CSS header. + * + * Return: zero on success, or a negative error code on failure. + */ +static int ice_determine_css_hdr_len(struct ice_hw *hw) +{ + struct ice_bank_info *banks = &hw->flash.banks; + int status; + + status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK, + &banks->active_css_hdr_len); + if (status) + return status; + + status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK, + &banks->inactive_css_hdr_len); + if (status) + return status; + + return 0; +} + /** * ice_init_nvm - initializes NVM setting * @hw: pointer to the HW struct @@ -1069,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw) return status; } + status = ice_determine_css_hdr_len(hw); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n"); + return status; + } + status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index f0796a93f4287..eef397e5baa07 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -482,6 +482,8 @@ struct ice_bank_info { u32 orom_size; /* Size of OROM bank */ u32 netlist_ptr; /* Pointer to 1st Netlist bank */ u32 netlist_size; /* Size of Netlist bank */ + u32 active_css_hdr_len; /* Active CSS header length */ + u32 inactive_css_hdr_len; /* Inactive CSS header length */ enum ice_flash_bank nvm_bank; /* Active NVM bank */ enum ice_flash_bank orom_bank; /* Active OROM bank */ enum ice_flash_bank netlist_bank; /* Active Netlist bank */ @@ -1087,17 +1089,13 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 /* CSS Header words */ +#define ICE_NVM_CSS_HDR_LEN_L 0x02 +#define ICE_NVM_CSS_HDR_LEN_H 0x03 #define ICE_NVM_CSS_SREV_L 0x14 #define ICE_NVM_CSS_SREV_H 0x15 -/* Length of CSS header section in words */ -#define ICE_CSS_HEADER_LENGTH 330 - -/* Offset of Shadow RAM copy in the NVM bank area. */ -#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32) - -/* Size in bytes of Option ROM trailer */ -#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH) +/* Length of Authentication header section in words */ +#define ICE_NVM_AUTH_HEADER_LEN 0x08 /* The Link Topology Netlist section is stored as a series of words. It is * stored in the NVM as a TLV, with the first two words containing the type From adbf5a42341f6ea038d3626cd4437d9f0ad0b2dd Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Mon, 3 Jun 2024 14:42:32 -0700 Subject: [PATCH 353/537] ice: remove af_xdp_zc_qps bitmap Referenced commit has introduced a bitmap to distinguish between ZC and copy-mode AF_XDP queues, because xsk_get_pool_from_qid() does not do this for us. The bitmap would be especially useful when restoring previous state after rebuild, if only it was not reallocated in the process. This leads to e.g. xdpsock dying after changing number of queues. Instead of preserving the bitmap during the rebuild, remove it completely and distinguish between ZC and copy-mode queues based on the presence of a device associated with the pool. Fixes: e102db780e1c ("ice: track AF_XDP ZC enabled queues in bitmap") Reviewed-by: Przemek Kitszel Signed-off-by: Larysa Zaremba Reviewed-by: Simon Horman Tested-by: Chandan Kumar Rout Signed-off-by: Jacob Keller Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-3-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice.h | 32 ++++++++++++++++-------- drivers/net/ethernet/intel/ice/ice_lib.c | 8 ------ drivers/net/ethernet/intel/ice/ice_xsk.c | 13 +++++----- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 6ad8002b22e14..5e51cb84b6006 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -409,7 +409,6 @@ struct ice_vsi { struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_tx_ring **xdp_rings; /* XDP ring array */ - unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ @@ -746,6 +745,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) ring->flags |= ICE_TX_FLAGS_RING_XDP; } +/** + * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID + * @vsi: pointer to VSI + * @qid: index of a queue to look at XSK buff pool presence + * + * Return: A pointer to xsk_buff_pool structure if there is a buffer pool + * attached and configured as zero-copy, NULL otherwise. + */ +static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, + u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); + + if (!ice_is_xdp_ena_vsi(vsi)) + return NULL; + + return (pool && pool->dev) ? pool : NULL; +} + /** * ice_xsk_pool - get XSK buffer pool bound to a ring * @ring: Rx ring to use @@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) struct ice_vsi *vsi = ring->vsi; u16 qid = ring->q_index; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) - return NULL; - - return xsk_get_pool_from_qid(vsi->netdev, qid); + return ice_get_xp_from_qid(vsi, qid); } /** @@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) if (!ring) return; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { - ring->xsk_pool = NULL; - return; - } - - ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); + ring->xsk_pool = ice_get_xp_from_qid(vsi, qid); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 5371e91f6bbb4..c0a7ff6c7e877 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->q_vectors) goto err_vectors; - vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); - if (!vsi->af_xdp_zc_qps) - goto err_zc_qps; - return 0; -err_zc_qps: - devm_kfree(dev, vsi->q_vectors); err_vectors: devm_kfree(dev, vsi->rxq_map); err_rxq_map: @@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); - bitmap_free(vsi->af_xdp_zc_qps); - vsi->af_xdp_zc_qps = NULL; /* free the ring and vector containers */ devm_kfree(dev, vsi->q_vectors); vsi->q_vectors = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 7541f223bf4f6..a65955eb23c0b 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) if (!pool) return -EINVAL; - clear_bit(qid, vsi->af_xdp_zc_qps); xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); return 0; @@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) if (err) return err; - set_bit(qid, vsi->af_xdp_zc_qps); - return 0; } @@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) { struct ice_rx_ring *rx_ring; - unsigned long q; + uint i; + + ice_for_each_rxq(vsi, i) { + rx_ring = vsi->rx_rings[i]; + if (!rx_ring->xsk_pool) + continue; - for_each_set_bit(q, vsi->af_xdp_zc_qps, - max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) { - rx_ring = vsi->rx_rings[q]; if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) return -ENOMEM; } From 744d197162c2070a6045a71e2666ed93a57cc65d Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Mon, 3 Jun 2024 14:42:33 -0700 Subject: [PATCH 354/537] ice: add flag to distinguish reset from .ndo_bpf in XDP rings config Commit 6624e780a577 ("ice: split ice_vsi_setup into smaller functions") has placed ice_vsi_free_q_vectors() after ice_destroy_xdp_rings() in the rebuild process. The behaviour of the XDP rings config functions is context-dependent, so the change of order has led to ice_destroy_xdp_rings() doing additional work and removing XDP prog, when it was supposed to be preserved. Also, dependency on the PF state reset flags creates an additional, fortunately less common problem: * PFR is requested e.g. by tx_timeout handler * .ndo_bpf() is asked to delete the program, calls ice_destroy_xdp_rings(), but reset flag is set, so rings are destroyed without deleting the program * ice_vsi_rebuild tries to delete non-existent XDP rings, because the program is still on the VSI * system crashes With a similar race, when requested to attach a program, ice_prepare_xdp_rings() can actually skip setting the program in the VSI and nevertheless report success. Instead of reverting to the old order of function calls, add an enum argument to both ice_prepare_xdp_rings() and ice_destroy_xdp_rings() in order to distinguish between calls from rebuild and .ndo_bpf(). Fixes: efc2214b6047 ("ice: Add support for XDP") Reviewed-by: Igor Bagnucki Signed-off-by: Larysa Zaremba Reviewed-by: Simon Horman Tested-by: Chandan Kumar Rout Signed-off-by: Jacob Keller Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-4-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice.h | 11 +++++++++-- drivers/net/ethernet/intel/ice/ice_lib.c | 5 +++-- drivers/net/ethernet/intel/ice/ice_main.c | 22 ++++++++++++---------- 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 5e51cb84b6006..a5de6ef9c07ef 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -930,9 +930,16 @@ int ice_down(struct ice_vsi *vsi); int ice_down_up(struct ice_vsi *vsi); int ice_vsi_cfg_lan(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); + +enum ice_xdp_cfg { + ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */ + ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */ +}; + int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); -int ice_destroy_xdp_rings(struct ice_vsi *vsi); +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type); +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); int ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index c0a7ff6c7e877..dd8b374823eec 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2285,7 +2285,8 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) ret = ice_vsi_determine_xdp_res(vsi); if (ret) goto unroll_vector_base; - ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); + ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, + ICE_XDP_CFG_PART); if (ret) goto unroll_vector_base; } @@ -2429,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi) /* return value check can be skipped here, it always returns * 0 if reset is in progress */ - ice_destroy_xdp_rings(vsi); + ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART); ice_vsi_clear_rings(vsi); ice_vsi_free_q_vectors(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f60c022f79609..2a270aacd24a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2711,10 +2711,12 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * @vsi: VSI to bring up Tx rings used by XDP * @prog: bpf program that will be assigned to VSI + * @cfg_type: create from scratch or restore the existing configuration * * Return 0 on success and negative value on error */ -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; int xdp_rings_rem = vsi->num_xdp_txq; @@ -2790,7 +2792,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) * taken into account at the end of ice_vsi_rebuild, where * ice_cfg_vsi_lan is being called */ - if (ice_is_reset_in_progress(pf->state)) + if (cfg_type == ICE_XDP_CFG_PART) return 0; /* tell the Tx scheduler that right now we have @@ -2842,22 +2844,21 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) /** * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings * @vsi: VSI to remove XDP rings + * @cfg_type: disable XDP permanently or allow it to be restored later * * Detach XDP rings from irq vectors, clean up the PF bitmap and free * resources */ -int ice_destroy_xdp_rings(struct ice_vsi *vsi) +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; int i, v_idx; /* q_vectors are freed in reset path so there's no point in detaching - * rings; in case of rebuild being triggered not from reset bits - * in pf->state won't be set, so additionally check first q_vector - * against NULL + * rings */ - if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + if (cfg_type == ICE_XDP_CFG_PART) goto free_qmap; ice_for_each_q_vector(vsi, v_idx) { @@ -2898,7 +2899,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi) if (static_key_enabled(&ice_xdp_locking_key)) static_branch_dec(&ice_xdp_locking_key); - if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + if (cfg_type == ICE_XDP_CFG_PART) return 0; ice_vsi_assign_bpf_prog(vsi, NULL); @@ -3009,7 +3010,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) { NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); } else { - xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, + ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); } @@ -3020,7 +3022,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_features_clear_redirect_target(vsi->netdev); - xdp_ring_err = ice_destroy_xdp_rings(vsi); + xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); /* reallocate Rx queues that were used for zero-copy */ From f3df4044254c98128890b512bf19cc05588f1fe5 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Mon, 3 Jun 2024 14:42:34 -0700 Subject: [PATCH 355/537] ice: map XDP queues to vectors in ice_vsi_map_rings_to_vectors() ice_pf_dcb_recfg() re-maps queues to vectors with ice_vsi_map_rings_to_vectors(), which does not restore the previous state for XDP queues. This leads to no AF_XDP traffic after rebuild. Map XDP queues to vectors in ice_vsi_map_rings_to_vectors(). Also, move the code around, so XDP queues are mapped independently only through .ndo_bpf(). Fixes: 6624e780a577 ("ice: split ice_vsi_setup into smaller functions") Reviewed-by: Przemek Kitszel Signed-off-by: Larysa Zaremba Reviewed-by: Simon Horman Tested-by: Chandan Kumar Rout Signed-off-by: Jacob Keller Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-5-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_base.c | 3 + drivers/net/ethernet/intel/ice/ice_lib.c | 14 ++-- drivers/net/ethernet/intel/ice/ice_main.c | 96 ++++++++++++++--------- 4 files changed, 68 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index a5de6ef9c07ef..99a75a59078ef 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -940,6 +940,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, enum ice_xdp_cfg cfg_type); int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); +void ice_map_xdp_rings(struct ice_vsi *vsi); int ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 687f6cb2b917a..5d396c1a77314 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) } rx_rings_rem -= rx_rings_per_v; } + + if (ice_is_xdp_ena_vsi(vsi)) + ice_map_xdp_rings(vsi); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index dd8b374823eec..7629b0190578b 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2274,13 +2274,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) if (ret) goto unroll_vector_base; - ice_vsi_map_rings_to_vectors(vsi); - - /* Associate q_vector rings to napi */ - ice_vsi_set_napi_queues(vsi); - - vsi->stat_offsets_loaded = false; - if (ice_is_xdp_ena_vsi(vsi)) { ret = ice_vsi_determine_xdp_res(vsi); if (ret) @@ -2291,6 +2284,13 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) goto unroll_vector_base; } + ice_vsi_map_rings_to_vectors(vsi); + + /* Associate q_vector rings to napi */ + ice_vsi_set_napi_queues(vsi); + + vsi->stat_offsets_loaded = false; + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ if (vsi->type != ICE_VSI_CTRL) /* Do not exit if configuring RSS had an issue, at diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2a270aacd24a9..1b61ca3a6eb6e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2707,6 +2707,60 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) bpf_prog_put(old_prog); } +static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) +{ + struct ice_q_vector *q_vector; + struct ice_tx_ring *ring; + + if (static_key_enabled(&ice_xdp_locking_key)) + return vsi->xdp_rings[qid % vsi->num_xdp_txq]; + + q_vector = vsi->rx_rings[qid]->q_vector; + ice_for_each_tx_ring(ring, q_vector->tx) + if (ice_ring_is_xdp(ring)) + return ring; + + return NULL; +} + +/** + * ice_map_xdp_rings - Map XDP rings to interrupt vectors + * @vsi: the VSI with XDP rings being configured + * + * Map XDP rings to interrupt vectors and perform the configuration steps + * dependent on the mapping. + */ +void ice_map_xdp_rings(struct ice_vsi *vsi) +{ + int xdp_rings_rem = vsi->num_xdp_txq; + int v_idx, q_idx; + + /* follow the logic from ice_vsi_map_rings_to_vectors */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + int xdp_rings_per_v, q_id, q_base; + + xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, + vsi->num_q_vectors - v_idx); + q_base = vsi->num_xdp_txq - xdp_rings_rem; + + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { + struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; + + xdp_ring->q_vector = q_vector; + xdp_ring->next = q_vector->tx.tx_ring; + q_vector->tx.tx_ring = xdp_ring; + } + xdp_rings_rem -= xdp_rings_per_v; + } + + ice_for_each_rxq(vsi, q_idx) { + vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, + q_idx); + ice_tx_xsk_pool(vsi, q_idx); + } +} + /** * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * @vsi: VSI to bring up Tx rings used by XDP @@ -2719,7 +2773,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - int xdp_rings_rem = vsi->num_xdp_txq; struct ice_pf *pf = vsi->back; struct ice_qs_cfg xdp_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, @@ -2732,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, .mapping_mode = ICE_VSI_MAP_CONTIG }; struct device *dev; - int i, v_idx; - int status; + int status, i; dev = ice_pf_to_dev(pf); vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -2752,42 +2804,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, if (ice_xdp_alloc_setup_rings(vsi)) goto clear_xdp_rings; - /* follow the logic from ice_vsi_map_rings_to_vectors */ - ice_for_each_q_vector(vsi, v_idx) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; - int xdp_rings_per_v, q_id, q_base; - - xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, - vsi->num_q_vectors - v_idx); - q_base = vsi->num_xdp_txq - xdp_rings_rem; - - for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { - struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; - - xdp_ring->q_vector = q_vector; - xdp_ring->next = q_vector->tx.tx_ring; - q_vector->tx.tx_ring = xdp_ring; - } - xdp_rings_rem -= xdp_rings_per_v; - } - - ice_for_each_rxq(vsi, i) { - if (static_key_enabled(&ice_xdp_locking_key)) { - vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; - } else { - struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; - struct ice_tx_ring *ring; - - ice_for_each_tx_ring(ring, q_vector->tx) { - if (ice_ring_is_xdp(ring)) { - vsi->rx_rings[i]->xdp_ring = ring; - break; - } - } - } - ice_tx_xsk_pool(vsi, i); - } - /* omit the scheduler update if in reset path; XDP queues will be * taken into account at the end of ice_vsi_rebuild, where * ice_cfg_vsi_lan is being called @@ -2795,6 +2811,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, if (cfg_type == ICE_XDP_CFG_PART) return 0; + ice_map_xdp_rings(vsi); + /* tell the Tx scheduler that right now we have * additional queues */ From 7d67d11fbe194f71298263f48e33ae2afa38197e Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 3 Jun 2024 14:42:35 -0700 Subject: [PATCH 356/537] igc: Fix Energy Efficient Ethernet support declaration The commit 01cf893bf0f4 ("net: intel: i40e/igc: Remove setting Autoneg in EEE capabilities") removed SUPPORTED_Autoneg field but left inappropriate ethtool_keee structure initialization. When "ethtool --show " (get_eee) invoke, the 'ethtool_keee' structure was accidentally overridden. Remove the 'ethtool_keee' overriding and add EEE declaration as per IEEE specification that allows reporting Energy Efficient Ethernet capabilities. Examples: Before fix: ethtool --show-eee enp174s0 EEE settings for enp174s0: EEE status: not supported After fix: EEE settings for enp174s0: EEE status: disabled Tx LPI: disabled Supported EEE link modes: 100baseT/Full 1000baseT/Full 2500baseT/Full Fixes: 01cf893bf0f4 ("net: intel: i40e/igc: Remove setting Autoneg in EEE capabilities") Suggested-by: Dima Ruinskiy Signed-off-by: Sasha Neftin Tested-by: Naama Meir Signed-off-by: Jacob Keller Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-6-e3563aa89b0c@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/igc/igc_ethtool.c | 9 +++++++-- drivers/net/ethernet/intel/igc/igc_main.c | 4 ++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index f2c4f1966bb04..0cd2bd695db1d 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev, struct igc_hw *hw = &adapter->hw; u32 eeer; + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + edata->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + edata->supported); + if (hw->dev_spec._base.eee_enable) mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert); - *edata = adapter->eee; - eeer = rd32(IGC_EEER); /* EEE status on negotiated link */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 12f004f46082c..305e05294a265 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -4975,6 +4976,9 @@ void igc_up(struct igc_adapter *adapter) /* start the watchdog. */ hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); + + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T | + MDIO_EEE_2_5GT; } /** From 5703fc058efdafcdd6b70776ee562478f0753acb Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Thu, 28 Mar 2024 14:07:16 -0500 Subject: [PATCH 357/537] drm/vmwgfx: Don't memcmp equivalent pointers These pointers are frequently the same and memcmp does not compare the pointers before comparing their contents so this was wasting cycles comparing 16 KiB of memory which will always be equal. Fixes: bb6780aa5a1d ("drm/vmwgfx: Diff cursors when using cmds") Signed-off-by: Ian Forbes Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20240328190716.27367-1-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 9532258a0848c..00c4ff6841301 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -224,7 +224,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, new_image = vmw_du_cursor_plane_acquire_image(new_vps); changed = false; - if (old_image && new_image) + if (old_image && new_image && old_image != new_image) changed = memcmp(old_image, new_image, size) != 0; return changed; From b0c9a26435413b81799047a7be53255640432547 Mon Sep 17 00:00:00 2001 From: Aleksandr Mishin Date: Tue, 4 Jun 2024 11:25:00 +0300 Subject: [PATCH 358/537] net: wwan: iosm: Fix tainted pointer delete is case of region creation fail In case of region creation fail in ipc_devlink_create_region(), previously created regions delete process starts from tainted pointer which actually holds error code value. Fix this bug by decreasing region index before delete. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 4dcd183fbd67 ("net: wwan: iosm: devlink registration") Signed-off-by: Aleksandr Mishin Acked-by: Sergey Ryazanov Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20240604082500.20769-1-amishin@t-argos.ru Signed-off-by: Paolo Abeni --- drivers/net/wwan/iosm/iosm_ipc_devlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c index bef6819986e93..33d6342124bc3 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -211,7 +211,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink) rc = PTR_ERR(devlink->cd_regions[i]); dev_err(devlink->dev, "Devlink region fail,err %d", rc); /* Delete previously created regions */ - for ( ; i >= 0; i--) + for (i--; i >= 0; i--) devlink_region_destroy(devlink->cd_regions[i]); goto region_create_fail; } From 26bfb8b57063f52b867f9b6c8d1742fcb5bd656c Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:27 -0700 Subject: [PATCH 359/537] af_unix: Set sk->sk_state under unix_state_lock() for truly disconencted peer. When a SOCK_DGRAM socket connect()s to another socket, the both sockets' sk->sk_state are changed to TCP_ESTABLISHED so that we can register them to BPF SOCKMAP. When the socket disconnects from the peer by connect(AF_UNSPEC), the state is set back to TCP_CLOSE. Then, the peer's state is also set to TCP_CLOSE, but the update is done locklessly and unconditionally. Let's say socket A connect()ed to B, B connect()ed to C, and A disconnects from B. After the first two connect()s, all three sockets' sk->sk_state are TCP_ESTABLISHED: $ ss -xa Netid State Recv-Q Send-Q Local Address:Port Peer Address:PortProcess u_dgr ESTAB 0 0 @A 641 * 642 u_dgr ESTAB 0 0 @B 642 * 643 u_dgr ESTAB 0 0 @C 643 * 0 And after the disconnect, B's state is TCP_CLOSE even though it's still connected to C and C's state is TCP_ESTABLISHED. $ ss -xa Netid State Recv-Q Send-Q Local Address:Port Peer Address:PortProcess u_dgr UNCONN 0 0 @A 641 * 0 u_dgr UNCONN 0 0 @B 642 * 643 u_dgr ESTAB 0 0 @C 643 * 0 In this case, we cannot register B to SOCKMAP. So, when a socket disconnects from the peer, we should not set TCP_CLOSE to the peer if the peer is connected to yet another socket, and this must be done under unix_state_lock(). Note that we use WRITE_ONCE() for sk->sk_state as there are many lockless readers. These data-races will be fixed in the following patches. Fixes: 83301b5367a9 ("af_unix: Set TCP_ESTABLISHED for datagram sockets too") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 25b49efc0926b..b162164b7a420 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -570,7 +570,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other) sk_error_report(other); } } - other->sk_state = TCP_CLOSE; } static void unix_sock_destructor(struct sock *sk) @@ -1424,8 +1423,15 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, unix_state_double_unlock(sk, other); - if (other != old_peer) + if (other != old_peer) { unix_dgram_disconnected(sk, old_peer); + + unix_state_lock(old_peer); + if (!unix_peer(old_peer)) + WRITE_ONCE(old_peer->sk_state, TCP_CLOSE); + unix_state_unlock(old_peer); + } + sock_put(old_peer); } else { unix_peer(sk) = other; From 942238f9735a4a4ebf8274b218d9a910158941d1 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:28 -0700 Subject: [PATCH 360/537] af_unix: Annodate data-races around sk->sk_state for writers. sk->sk_state is changed under unix_state_lock(), but it's read locklessly in many places. This patch adds WRITE_ONCE() on the writer side. We will add READ_ONCE() to the lockless readers in the following patches. Fixes: 83301b5367a9 ("af_unix: Set TCP_ESTABLISHED for datagram sockets too") Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b162164b7a420..424d021a4d7d7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -616,7 +616,7 @@ static void unix_release_sock(struct sock *sk, int embrion) u->path.dentry = NULL; u->path.mnt = NULL; state = sk->sk_state; - sk->sk_state = TCP_CLOSE; + WRITE_ONCE(sk->sk_state, TCP_CLOSE); skpair = unix_peer(sk); unix_peer(sk) = NULL; @@ -738,7 +738,8 @@ static int unix_listen(struct socket *sock, int backlog) if (backlog > sk->sk_max_ack_backlog) wake_up_interruptible_all(&u->peer_wait); sk->sk_max_ack_backlog = backlog; - sk->sk_state = TCP_LISTEN; + WRITE_ONCE(sk->sk_state, TCP_LISTEN); + /* set credentials so connect can copy them */ init_peercred(sk); err = 0; @@ -1401,7 +1402,8 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, if (err) goto out_unlock; - sk->sk_state = other->sk_state = TCP_ESTABLISHED; + WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED); + WRITE_ONCE(other->sk_state, TCP_ESTABLISHED); } else { /* * 1003.1g breaking connected state with AF_UNSPEC @@ -1418,7 +1420,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, unix_peer(sk) = other; if (!other) - sk->sk_state = TCP_CLOSE; + WRITE_ONCE(sk->sk_state, TCP_CLOSE); unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); unix_state_double_unlock(sk, other); @@ -1639,7 +1641,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, copy_peercred(sk, other); sock->state = SS_CONNECTED; - sk->sk_state = TCP_ESTABLISHED; + WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED); sock_hold(newsk); smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ @@ -2050,7 +2052,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, unix_peer(sk) = NULL; unix_dgram_peer_wake_disconnect_wakeup(sk, other); - sk->sk_state = TCP_CLOSE; + WRITE_ONCE(sk->sk_state, TCP_CLOSE); unix_state_unlock(sk); unix_dgram_disconnected(sk, other); From 3a0f38eb285c8c2eead4b3230c7ac2983707599d Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:29 -0700 Subject: [PATCH 361/537] af_unix: Annotate data-race of sk->sk_state in unix_inq_len(). ioctl(SIOCINQ) calls unix_inq_len() that checks sk->sk_state first and returns -EINVAL if it's TCP_LISTEN. Then, for SOCK_STREAM sockets, unix_inq_len() returns the number of bytes in recvq. However, unix_inq_len() does not hold unix_state_lock(), and the concurrent listen() might change the state after checking sk->sk_state. If the race occurs, 0 is returned for the listener, instead of -EINVAL, because the length of skb with embryo is 0. We could hold unix_state_lock() in unix_inq_len(), but it's overkill given the result is true for pre-listen() TCP_CLOSE state. So, let's use READ_ONCE() for sk->sk_state in unix_inq_len(). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 424d021a4d7d7..b37b53767b29b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -3017,7 +3017,7 @@ long unix_inq_len(struct sock *sk) struct sk_buff *skb; long amount = 0; - if (sk->sk_state == TCP_LISTEN) + if (READ_ONCE(sk->sk_state) == TCP_LISTEN) return -EINVAL; spin_lock(&sk->sk_receive_queue.lock); From eb0718fb3e97ad0d6f4529b810103451c90adf94 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:30 -0700 Subject: [PATCH 362/537] af_unix: Annotate data-races around sk->sk_state in unix_write_space() and poll(). unix_poll() and unix_dgram_poll() read sk->sk_state locklessly and calls unix_writable() which also reads sk->sk_state without holding unix_state_lock(). Let's use READ_ONCE() in unix_poll() and unix_dgram_poll() and pass it to unix_writable(). While at it, we remove TCP_SYN_SENT check in unix_dgram_poll() as that state does not exist for AF_UNIX socket since the code was added. Fixes: 1586a5877db9 ("af_unix: do not report POLLOUT on listeners") Fixes: 3c73419c09a5 ("af_unix: fix 'poll for write'/ connected DGRAM sockets") Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b37b53767b29b..3bacf47cb94de 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -530,9 +530,9 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) return 0; } -static int unix_writable(const struct sock *sk) +static int unix_writable(const struct sock *sk, unsigned char state) { - return sk->sk_state != TCP_LISTEN && + return state != TCP_LISTEN && (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; } @@ -541,7 +541,7 @@ static void unix_write_space(struct sock *sk) struct socket_wq *wq; rcu_read_lock(); - if (unix_writable(sk)) { + if (unix_writable(sk, READ_ONCE(sk->sk_state))) { wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, @@ -3129,12 +3129,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; + unsigned char state; __poll_t mask; u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; shutdown = READ_ONCE(sk->sk_shutdown); + state = READ_ONCE(sk->sk_state); /* exceptional events? */ if (READ_ONCE(sk->sk_err)) @@ -3156,14 +3158,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa /* Connection-based need to check for termination and startup */ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && - sk->sk_state == TCP_CLOSE) + state == TCP_CLOSE) mask |= EPOLLHUP; /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. */ - if (unix_writable(sk)) + if (unix_writable(sk, state)) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; return mask; @@ -3174,12 +3176,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, { struct sock *sk = sock->sk, *other; unsigned int writable; + unsigned char state; __poll_t mask; u8 shutdown; sock_poll_wait(file, sock, wait); mask = 0; shutdown = READ_ONCE(sk->sk_shutdown); + state = READ_ONCE(sk->sk_state); /* exceptional events? */ if (READ_ONCE(sk->sk_err) || @@ -3199,19 +3203,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, mask |= EPOLLIN | EPOLLRDNORM; /* Connection-based need to check for termination and startup */ - if (sk->sk_type == SOCK_SEQPACKET) { - if (sk->sk_state == TCP_CLOSE) - mask |= EPOLLHUP; - /* connection hasn't started yet? */ - if (sk->sk_state == TCP_SYN_SENT) - return mask; - } + if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE) + mask |= EPOLLHUP; /* No write status requested, avoid expensive OUT tests. */ if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) return mask; - writable = unix_writable(sk); + writable = unix_writable(sk, state); if (writable) { unix_state_lock(sk); From a9bf9c7dc6a5899c01cb8f6e773a66315a5cd4b7 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:31 -0700 Subject: [PATCH 363/537] af_unix: Annotate data-race of sk->sk_state in unix_stream_connect(). As small optimisation, unix_stream_connect() prefetches the client's sk->sk_state without unix_state_lock() and checks if it's TCP_CLOSE. Later, sk->sk_state is checked again under unix_state_lock(). Let's use READ_ONCE() for the first check and TCP_CLOSE directly for the second check. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 3bacf47cb94de..84552826530df 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1481,7 +1481,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, struct sk_buff *skb = NULL; long timeo; int err; - int st; err = unix_validate_addr(sunaddr, addr_len); if (err) @@ -1571,9 +1570,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, Well, and we have to recheck the state after socket locked. */ - st = sk->sk_state; - - switch (st) { + switch (READ_ONCE(sk->sk_state)) { case TCP_CLOSE: /* This is ok... continue with connect */ break; @@ -1588,7 +1585,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, unix_state_lock_nested(sk, U_LOCK_SECOND); - if (sk->sk_state != st) { + if (sk->sk_state != TCP_CLOSE) { unix_state_unlock(sk); unix_state_unlock(other); sock_put(other); From 1b536948e805aab61a48c5aa5db10c9afee880bd Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:32 -0700 Subject: [PATCH 364/537] af_unix: Annotate data-race of sk->sk_state in unix_accept(). Once sk->sk_state is changed to TCP_LISTEN, it never changes. unix_accept() takes the advantage and reads sk->sk_state without holding unix_state_lock(). Let's use READ_ONCE() there. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 84552826530df..4763c26ae480a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1710,7 +1710,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, goto out; arg->err = -EINVAL; - if (sk->sk_state != TCP_LISTEN) + if (READ_ONCE(sk->sk_state) != TCP_LISTEN) goto out; /* If socket state is TCP_LISTEN it cannot change (for now...), From 8a34d4e8d9742a24f74998f45a6a98edd923319b Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:33 -0700 Subject: [PATCH 365/537] af_unix: Annotate data-races around sk->sk_state in sendmsg() and recvmsg(). The following functions read sk->sk_state locklessly and proceed only if the state is TCP_ESTABLISHED. * unix_stream_sendmsg * unix_stream_read_generic * unix_seqpacket_sendmsg * unix_seqpacket_recvmsg Let's use READ_ONCE() there. Fixes: a05d2ad1c1f3 ("af_unix: Only allow recv on connected seqpacket sockets.") Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4763c26ae480a..4ef9c21783a5a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2226,7 +2226,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, } if (msg->msg_namelen) { - err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; + err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out_err; } else { err = -ENOTCONN; @@ -2340,7 +2340,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, if (err) return err; - if (sk->sk_state != TCP_ESTABLISHED) + if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED) return -ENOTCONN; if (msg->msg_namelen) @@ -2354,7 +2354,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; - if (sk->sk_state != TCP_ESTABLISHED) + if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED) return -ENOTCONN; return unix_dgram_recvmsg(sock, msg, size, flags); @@ -2683,7 +2683,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state, size_t size = state->size; unsigned int last_len; - if (unlikely(sk->sk_state != TCP_ESTABLISHED)) { + if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) { err = -EINVAL; goto out; } From af4c733b6b1aded4dc808fafece7dfe6e9d2ebb3 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:34 -0700 Subject: [PATCH 366/537] af_unix: Annotate data-race of sk->sk_state in unix_stream_read_skb(). unix_stream_read_skb() is called from sk->sk_data_ready() context where unix_state_lock() is not held. Let's use READ_ONCE() there. Fixes: 77462de14a43 ("af_unix: Add read_sock for stream socket types") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 4ef9c21783a5a..e7b74207aa3bd 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2659,7 +2659,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { - if (unlikely(sk->sk_state != TCP_ESTABLISHED)) + if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) return -ENOTCONN; return unix_read_skb(sk, recv_actor); From 0aa3be7b3e1f8f997312cc4705f8165e02806f8f Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:35 -0700 Subject: [PATCH 367/537] af_unix: Annotate data-races around sk->sk_state in UNIX_DIAG. While dumping AF_UNIX sockets via UNIX_DIAG, sk->sk_state is read locklessly. Let's use READ_ONCE() there. Note that the result could be inconsistent if the socket is dumped during the state change. This is common for other SOCK_DIAG and similar interfaces. Fixes: c9da99e6475f ("unix_diag: Fixup RQLEN extension report") Fixes: 2aac7a2cb0d9 ("unix_diag: Pending connections IDs NLA") Fixes: 45a96b9be6ec ("unix_diag: Dumping all sockets core") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/diag.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/unix/diag.c b/net/unix/diag.c index ae39538c5042b..116cf508aea4a 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) u32 *buf; int i; - if (sk->sk_state == TCP_LISTEN) { + if (READ_ONCE(sk->sk_state) == TCP_LISTEN) { spin_lock(&sk->sk_receive_queue.lock); attr = nla_reserve(nlskb, UNIX_DIAG_ICONS, @@ -103,7 +103,7 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) { struct unix_diag_rqlen rql; - if (sk->sk_state == TCP_LISTEN) { + if (READ_ONCE(sk->sk_state) == TCP_LISTEN) { rql.udiag_rqueue = sk->sk_receive_queue.qlen; rql.udiag_wqueue = sk->sk_max_ack_backlog; } else { @@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r rep = nlmsg_data(nlh); rep->udiag_family = AF_UNIX; rep->udiag_type = sk->sk_type; - rep->udiag_state = sk->sk_state; + rep->udiag_state = READ_ONCE(sk->sk_state); rep->pad = 0; rep->udiag_ino = sk_ino; sock_diag_save_cookie(sk, rep->udiag_cookie); @@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) sk_for_each(sk, &net->unx.table.buckets[slot]) { if (num < s_num) goto next; - if (!(req->udiag_states & (1 << sk->sk_state))) + if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state)))) goto next; if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk), NETLINK_CB(cb->skb).portid, From b0632e53e0da8054e36bc973f0eec69d30f1b7c6 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:36 -0700 Subject: [PATCH 368/537] af_unix: Annotate data-races around sk->sk_sndbuf. sk_setsockopt() changes sk->sk_sndbuf under lock_sock(), but it's not used in af_unix.c. Let's use READ_ONCE() to read sk->sk_sndbuf in unix_writable(), unix_dgram_sendmsg(), and unix_stream_sendmsg(). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e7b74207aa3bd..de2a5e334a88e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -533,7 +533,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) static int unix_writable(const struct sock *sk, unsigned char state) { return state != TCP_LISTEN && - (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; + (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf); } static void unix_write_space(struct sock *sk) @@ -1967,7 +1967,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, } err = -EMSGSIZE; - if (len > sk->sk_sndbuf - 32) + if (len > READ_ONCE(sk->sk_sndbuf) - 32) goto out; if (len > SKB_MAX_ALLOC) { @@ -2247,7 +2247,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, &err, 0); } else { /* Keep two messages in the pipe so it schedules better */ - size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); + size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64); /* allow fallback to order-0 allocations */ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); From bd9f2d05731f6a112d0c7391a0d537bfc588dbe6 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:37 -0700 Subject: [PATCH 369/537] af_unix: Annotate data-race of net->unx.sysctl_max_dgram_qlen. net->unx.sysctl_max_dgram_qlen is exposed as a sysctl knob and can be changed concurrently. Let's use READ_ONCE() in unix_create1(). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index de2a5e334a88e..623ee39657e23 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -976,7 +976,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, sk->sk_hash = unix_unbound_hash(sk); sk->sk_allocation = GFP_KERNEL_ACCOUNT; sk->sk_write_space = unix_write_space; - sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; + sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen); sk->sk_destruct = unix_sock_destructor; u = unix_sk(sk); u->listener = NULL; From 45d872f0e65593176d880ec148f41ad7c02e40a7 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:38 -0700 Subject: [PATCH 370/537] af_unix: Use unix_recvq_full_lockless() in unix_stream_connect(). Once sk->sk_state is changed to TCP_LISTEN, it never changes. unix_accept() takes advantage of this characteristics; it does not hold the listener's unix_state_lock() and only acquires recvq lock to pop one skb. It means unix_state_lock() does not prevent the queue length from changing in unix_stream_connect(). Thus, we need to use unix_recvq_full_lockless() to avoid data-race. Now we remove unix_recvq_full() as no one uses it. Note that we can remove READ_ONCE() for sk->sk_max_ack_backlog in unix_recvq_full_lockless() because of the following reasons: (1) For SOCK_DGRAM, it is a written-once field in unix_create1() (2) For SOCK_STREAM and SOCK_SEQPACKET, it is changed under the listener's unix_state_lock() in unix_listen(), and we hold the lock in unix_stream_connect() Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 623ee39657e23..eb3ba3448ed34 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) return unix_peer(osk) == NULL || unix_our_peer(sk, osk); } -static inline int unix_recvq_full(const struct sock *sk) -{ - return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; -} - static inline int unix_recvq_full_lockless(const struct sock *sk) { - return skb_queue_len_lockless(&sk->sk_receive_queue) > - READ_ONCE(sk->sk_max_ack_backlog); + return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; } struct sock *unix_peer_get(struct sock *s) @@ -1545,7 +1539,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; - if (unix_recvq_full(other)) { + if (unix_recvq_full_lockless(other)) { err = -EAGAIN; if (!timeo) goto out_unlock; From 83690b82d228b3570565ebd0b41873933238b97f Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:39 -0700 Subject: [PATCH 371/537] af_unix: Use skb_queue_empty_lockless() in unix_release_sock(). If the socket type is SOCK_STREAM or SOCK_SEQPACKET, unix_release_sock() checks the length of the peer socket's recvq under unix_state_lock(). However, unix_stream_read_generic() calls skb_unlink() after releasing the lock. Also, for SOCK_SEQPACKET, __skb_try_recv_datagram() unlinks skb without unix_state_lock(). Thues, unix_state_lock() does not protect qlen. Let's use skb_queue_empty_lockless() in unix_release_sock(). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index eb3ba3448ed34..80846279de9f3 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -631,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion) unix_state_lock(skpair); /* No more writes */ WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK); - if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) + if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion) WRITE_ONCE(skpair->sk_err, ECONNRESET); unix_state_unlock(skpair); skpair->sk_state_change(skpair); From 5d915e584d8408211d4567c22685aae8820bfc55 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:40 -0700 Subject: [PATCH 372/537] af_unix: Use skb_queue_len_lockless() in sk_diag_show_rqlen(). We can dump the socket queue length via UNIX_DIAG by specifying UDIAG_SHOW_RQLEN. If sk->sk_state is TCP_LISTEN, we return the recv queue length, but here we do not hold recvq lock. Let's use skb_queue_len_lockless() in sk_diag_show_rqlen(). Fixes: c9da99e6475f ("unix_diag: Fixup RQLEN extension report") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/diag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/diag.c b/net/unix/diag.c index 116cf508aea4a..321336f91a0af 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -104,7 +104,7 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) struct unix_diag_rqlen rql; if (READ_ONCE(sk->sk_state) == TCP_LISTEN) { - rql.udiag_rqueue = sk->sk_receive_queue.qlen; + rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue); rql.udiag_wqueue = sk->sk_max_ack_backlog; } else { rql.udiag_rqueue = (u32) unix_inq_len(sk); From efaf24e30ec39ebbea9112227485805a48b0ceb1 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Tue, 4 Jun 2024 09:52:41 -0700 Subject: [PATCH 373/537] af_unix: Annotate data-race of sk->sk_shutdown in sk_diag_fill(). While dumping sockets via UNIX_DIAG, we do not hold unix_state_lock(). Let's use READ_ONCE() to read sk->sk_shutdown. Fixes: e4e541a84863 ("sock-diag: Report shutdown for inet and unix sockets (v2)") Signed-off-by: Kuniyuki Iwashima Signed-off-by: Paolo Abeni --- net/unix/diag.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/unix/diag.c b/net/unix/diag.c index 321336f91a0af..937edf4afed41 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) goto out_nlmsg_trim; - if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown)) + if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown))) goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_UID) && From c181689bc479d3b2300f91fc4d53e089d7631898 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 2 Jun 2024 03:20:38 +0900 Subject: [PATCH 374/537] kconfig: remove unneeded code for user-supplied values being out of range This is a leftover from commit ce1fc9345a59 ("kconfig: do not clear SYMBOL_DEF_USER when the value is out of range"). This code is now redundant because if a user-supplied value is out of range, the value adjusted by sym_validate_range() differs, and conf_unsaved has already been incremented a few lines above. Signed-off-by: Masahiro Yamada --- scripts/kconfig/confdata.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 387503daf0f73..85b53069ba7a4 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -533,19 +533,6 @@ int conf_read(const char *name) */ if (sym->visible == no && !conf_unsaved) sym->flags &= ~SYMBOL_DEF_USER; - switch (sym->type) { - case S_STRING: - case S_INT: - case S_HEX: - /* Reset a string value if it's out of range */ - if (sym_string_within_range(sym, sym->def[S_DEF_USER].val)) - break; - sym->flags &= ~SYMBOL_VALID; - conf_unsaved++; - break; - default: - break; - } } } From 46edf4372e336ef3a61c3126e49518099d2e2e6d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 2 Jun 2024 03:20:40 +0900 Subject: [PATCH 375/537] kconfig: gconf: give a proper initial state to the Save button Currently, the initial state of the "Save" button is always active. If none of the CONFIG options are changed while loading the .config file, the "Save" button should be greyed out. This can be fixed by calling conf_read() after widget initialization. Signed-off-by: Masahiro Yamada --- scripts/kconfig/gconf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c index cc400ffe66150..e04dbafd3add9 100644 --- a/scripts/kconfig/gconf.c +++ b/scripts/kconfig/gconf.c @@ -1422,7 +1422,6 @@ int main(int ac, char *av[]) conf_parse(name); fixup_rootmenu(&rootmenu); - conf_read(NULL); /* Load the interface and connect signals */ init_main_window(glade_file); @@ -1430,6 +1429,8 @@ int main(int ac, char *av[]) init_left_tree(); init_right_tree(); + conf_read(NULL); + switch (view_mode) { case SINGLE_VIEW: display_tree_part(); From bf83266a1eef8251e2f126dba635039de069104a Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 4 Jun 2024 01:19:02 +0900 Subject: [PATCH 376/537] kconfig: doc: fix a typo in the note about 'imply' This sentence does not make sense due to a typo. Fix it. Fixes: def2fbffe62c ("kconfig: allow symbols implied by y to become m") Signed-off-by: Masahiro Yamada Reviewed-by: Randy Dunlap --- Documentation/kbuild/kconfig-language.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst index 555c2f8399697..86be5b857cc49 100644 --- a/Documentation/kbuild/kconfig-language.rst +++ b/Documentation/kbuild/kconfig-language.rst @@ -184,7 +184,7 @@ applicable everywhere (see syntax). ability to hook into a secondary subsystem while allowing the user to configure that subsystem out without also having to unset these drivers. - Note: If the combination of FOO=y and BAR=m causes a link error, + Note: If the combination of FOO=y and BAZ=m causes a link error, you can guard the function call with IS_REACHABLE():: foo_init() From 45c7f555bf5e716d9c6ffb737e97d4cc9b4c21ef Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 4 Jun 2024 01:19:03 +0900 Subject: [PATCH 377/537] kconfig: doc: document behavior of 'select' and 'imply' followed by 'if' Documentation/kbuild/kconfig-language.rst explains the behavior of 'select' as follows: reverse dependencies can be used to force a lower limit of another symbol. The value of the current menu symbol is used as the minimal value can be set to. This is not true when the 'select' property is followed by 'if'. [Test Code] config MODULES def_bool y modules config A def_tristate y select C if B config B def_tristate m config C tristate [Result] CONFIG_MODULES=y CONFIG_A=y CONFIG_B=m CONFIG_C=m If "the value of A is used as the minimal value C can be set to", C must be 'y'. The actual behavior is "C is selected by (A && B)". The lower limit of C is downgraded due to B being 'm'. This behavior is kind of weird, and this has arisen several times in the mailing list. I do not know whether it is a bug or intended behavior. Anyway, it is not feasible to change it now because many Kconfig files are written based on this behavior. The same applies to 'imply'. Document this (but reserve the possibility for a future change). Signed-off-by: Masahiro Yamada Reviewed-by: Randy Dunlap --- Documentation/kbuild/kconfig-language.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst index 86be5b857cc49..1fb3f5e6193c3 100644 --- a/Documentation/kbuild/kconfig-language.rst +++ b/Documentation/kbuild/kconfig-language.rst @@ -150,6 +150,12 @@ applicable everywhere (see syntax). That will limit the usefulness but on the other hand avoid the illegal configurations all over. + If "select" is followed by "if" , will be + selected by the logical AND of the value of the current menu symbol + and . This means, the lower limit can be downgraded due to the + presence of "if" . This behavior may seem weird, but we rely on + it. (The future of this behavior is undecided.) + - weak reverse dependencies: "imply" ["if" ] This is similar to "select" as it enforces a lower limit on another @@ -202,6 +208,10 @@ applicable everywhere (see syntax). imply BAR imply BAZ + Note: If "imply" is followed by "if" , the default of + will be the logical AND of the value of the current menu symbol and . + (The future of this behavior is undecided.) + - limiting menu display: "visible if" This attribute is only applicable to menu blocks, if the condition is From b01e1c030770ff3b4fe37fc7cc6bca03f594133f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 4 Jun 2024 19:35:49 +0000 Subject: [PATCH 378/537] ipv6: fix possible race in __fib6_drop_pcpu_from() syzbot found a race in __fib6_drop_pcpu_from() [1] If compiler reads more than once (*ppcpu_rt), second read could read NULL, if another cpu clears the value in rt6_get_pcpu_route(). Add a READ_ONCE() to prevent this race. Also add rcu_read_lock()/rcu_read_unlock() because we rely on RCU protection while dereferencing pcpu_rt. [1] Oops: general protection fault, probably for non-canonical address 0xdffffc0000000012: 0000 [#1] PREEMPT SMP KASAN PTI KASAN: null-ptr-deref in range [0x0000000000000090-0x0000000000000097] CPU: 0 PID: 7543 Comm: kworker/u8:17 Not tainted 6.10.0-rc1-syzkaller-00013-g2bfcfd584ff5 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/02/2024 Workqueue: netns cleanup_net RIP: 0010:__fib6_drop_pcpu_from.part.0+0x10a/0x370 net/ipv6/ip6_fib.c:984 Code: f8 48 c1 e8 03 80 3c 28 00 0f 85 16 02 00 00 4d 8b 3f 4d 85 ff 74 31 e8 74 a7 fa f7 49 8d bf 90 00 00 00 48 89 f8 48 c1 e8 03 <80> 3c 28 00 0f 85 1e 02 00 00 49 8b 87 90 00 00 00 48 8b 0c 24 48 RSP: 0018:ffffc900040df070 EFLAGS: 00010206 RAX: 0000000000000012 RBX: 0000000000000001 RCX: ffffffff89932e16 RDX: ffff888049dd1e00 RSI: ffffffff89932d7c RDI: 0000000000000091 RBP: dffffc0000000000 R08: 0000000000000005 R09: 0000000000000007 R10: 0000000000000001 R11: 0000000000000006 R12: ffff88807fa080b8 R13: fffffbfff1a9a07d R14: ffffed100ff41022 R15: 0000000000000001 FS: 0000000000000000(0000) GS:ffff8880b9200000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000001b32c26000 CR3: 000000005d56e000 CR4: 00000000003526f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __fib6_drop_pcpu_from net/ipv6/ip6_fib.c:966 [inline] fib6_drop_pcpu_from net/ipv6/ip6_fib.c:1027 [inline] fib6_purge_rt+0x7f2/0x9f0 net/ipv6/ip6_fib.c:1038 fib6_del_route net/ipv6/ip6_fib.c:1998 [inline] fib6_del+0xa70/0x17b0 net/ipv6/ip6_fib.c:2043 fib6_clean_node+0x426/0x5b0 net/ipv6/ip6_fib.c:2205 fib6_walk_continue+0x44f/0x8d0 net/ipv6/ip6_fib.c:2127 fib6_walk+0x182/0x370 net/ipv6/ip6_fib.c:2175 fib6_clean_tree+0xd7/0x120 net/ipv6/ip6_fib.c:2255 __fib6_clean_all+0x100/0x2d0 net/ipv6/ip6_fib.c:2271 rt6_sync_down_dev net/ipv6/route.c:4906 [inline] rt6_disable_ip+0x7ed/0xa00 net/ipv6/route.c:4911 addrconf_ifdown.isra.0+0x117/0x1b40 net/ipv6/addrconf.c:3855 addrconf_notify+0x223/0x19e0 net/ipv6/addrconf.c:3778 notifier_call_chain+0xb9/0x410 kernel/notifier.c:93 call_netdevice_notifiers_info+0xbe/0x140 net/core/dev.c:1992 call_netdevice_notifiers_extack net/core/dev.c:2030 [inline] call_netdevice_notifiers net/core/dev.c:2044 [inline] dev_close_many+0x333/0x6a0 net/core/dev.c:1585 unregister_netdevice_many_notify+0x46d/0x19f0 net/core/dev.c:11193 unregister_netdevice_many net/core/dev.c:11276 [inline] default_device_exit_batch+0x85b/0xae0 net/core/dev.c:11759 ops_exit_list+0x128/0x180 net/core/net_namespace.c:178 cleanup_net+0x5b7/0xbf0 net/core/net_namespace.c:640 process_one_work+0x9fb/0x1b60 kernel/workqueue.c:3231 process_scheduled_works kernel/workqueue.c:3312 [inline] worker_thread+0x6c8/0xf70 kernel/workqueue.c:3393 kthread+0x2c1/0x3a0 kernel/kthread.c:389 ret_from_fork+0x45/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 Fixes: d52d3997f843 ("ipv6: Create percpu rt6_info") Signed-off-by: Eric Dumazet Cc: Martin KaFai Lau Link: https://lore.kernel.org/r/20240604193549.981839-1-edumazet@google.com Signed-off-by: Paolo Abeni --- net/ipv6/ip6_fib.c | 6 +++++- net/ipv6/route.c | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 31d77885bcae3..6e57c03e3255f 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -966,6 +966,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh, if (!fib6_nh->rt6i_pcpu) return; + rcu_read_lock(); /* release the reference to this fib entry from * all of its cached pcpu routes */ @@ -974,7 +975,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh, struct rt6_info *pcpu_rt; ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu); - pcpu_rt = *ppcpu_rt; + + /* Paired with xchg() in rt6_get_pcpu_route() */ + pcpu_rt = READ_ONCE(*ppcpu_rt); /* only dropping the 'from' reference if the cached route * is using 'match'. The cached pcpu_rt->from only changes @@ -988,6 +991,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh, fib6_info_release(from); } } + rcu_read_unlock(); } struct fib6_nh_pcpu_arg { diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a504b88ec06b5..f083d9faba6b1 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1409,6 +1409,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res) struct rt6_info *prev, **p; p = this_cpu_ptr(res->nh->rt6i_pcpu); + /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */ prev = xchg(p, NULL); if (prev) { dst_dev_put(&prev->dst); From 77a92660d8fe8d29503fae768d9f5eb529c88b36 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 4 Jun 2024 01:19:04 +0900 Subject: [PATCH 379/537] kconfig: remove wrong expr_trans_bool() expr_trans_bool() performs an incorrect transformation. [Test Code] config MODULES def_bool y modules config A def_bool y select C if B != n config B def_tristate m config C tristate [Result] CONFIG_MODULES=y CONFIG_A=y CONFIG_B=m CONFIG_C=m This output is incorrect because CONFIG_C=y is expected. Documentation/kbuild/kconfig-language.rst clearly explains the function of the '!=' operator: If the values of both symbols are equal, it returns 'n', otherwise 'y'. Therefore, the statement: select C if B != n should be equivalent to: select C if y Or, more simply: select C Hence, the symbol C should be selected by the value of A, which is 'y'. However, expr_trans_bool() wrongly transforms it to: select C if B Therefore, the symbol C is selected by (A && B), which is 'm'. The comment block of expr_trans_bool() correctly explains its intention: * bool FOO!=n => FOO ^^^^ If FOO is bool, FOO!=n can be simplified into FOO. This is correct. However, the actual code performs this transformation when FOO is tristate: if (e->left.sym->type == S_TRISTATE) { ^^^^^^^^^^ While it can be fixed to S_BOOLEAN, there is no point in doing so because expr_tranform() already transforms FOO!=n to FOO when FOO is bool. (see the "case E_UNEQUAL" part) expr_trans_bool() is wrong and unnecessary. Signed-off-by: Masahiro Yamada Acked-by: Randy Dunlap --- scripts/kconfig/expr.c | 29 ----------------------------- scripts/kconfig/expr.h | 1 - scripts/kconfig/menu.c | 2 -- 3 files changed, 32 deletions(-) diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c index 4d95fce5f9a7a..fcc190b67b6f6 100644 --- a/scripts/kconfig/expr.c +++ b/scripts/kconfig/expr.c @@ -396,35 +396,6 @@ static struct expr *expr_eliminate_yn(struct expr *e) return e; } -/* - * bool FOO!=n => FOO - */ -struct expr *expr_trans_bool(struct expr *e) -{ - if (!e) - return NULL; - switch (e->type) { - case E_AND: - case E_OR: - case E_NOT: - e->left.expr = expr_trans_bool(e->left.expr); - e->right.expr = expr_trans_bool(e->right.expr); - break; - case E_UNEQUAL: - // FOO!=n -> FOO - if (e->left.sym->type == S_TRISTATE) { - if (e->right.sym == &symbol_no) { - e->type = E_SYMBOL; - e->right.sym = NULL; - } - } - break; - default: - ; - } - return e; -} - /* * e1 || e2 -> ? */ diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h index fa50fc45622e3..7c0c242318bcd 100644 --- a/scripts/kconfig/expr.h +++ b/scripts/kconfig/expr.h @@ -284,7 +284,6 @@ void expr_free(struct expr *e); void expr_eliminate_eq(struct expr **ep1, struct expr **ep2); int expr_eq(struct expr *e1, struct expr *e2); tristate expr_calc_value(struct expr *e); -struct expr *expr_trans_bool(struct expr *e); struct expr *expr_eliminate_dups(struct expr *e); struct expr *expr_transform(struct expr *e); int expr_contains_symbol(struct expr *dep, struct symbol *sym); diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c index 53151c5a60281..eef9b63cdf115 100644 --- a/scripts/kconfig/menu.c +++ b/scripts/kconfig/menu.c @@ -398,8 +398,6 @@ static void _menu_finalize(struct menu *parent, bool inside_choice) dep = expr_transform(dep); dep = expr_alloc_and(expr_copy(basedep), dep); dep = expr_eliminate_dups(dep); - if (menu->sym && menu->sym->type != S_TRISTATE) - dep = expr_trans_bool(dep); prop->visible.expr = dep; /* From 0dcc53abf58d572d34c5313de85f607cd33fc691 Mon Sep 17 00:00:00 2001 From: Su Hui Date: Wed, 5 Jun 2024 11:47:43 +0800 Subject: [PATCH 380/537] net: ethtool: fix the error condition in ethtool_get_phy_stats_ethtool() Clang static checker (scan-build) warning: net/ethtool/ioctl.c:line 2233, column 2 Called function pointer is null (null dereference). Return '-EOPNOTSUPP' when 'ops->get_ethtool_phy_stats' is NULL to fix this typo error. Fixes: 201ed315f967 ("net/ethtool/ioctl: split ethtool_get_phy_stats into multiple helpers") Signed-off-by: Su Hui Reviewed-by: Przemek Kitszel Reviewed-by: Hariprasad Kelam Link: https://lore.kernel.org/r/20240605034742.921751-1-suhui@nfschina.com Signed-off-by: Paolo Abeni --- net/ethtool/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 5a55270aa86e8..e645d751a5e89 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -2220,7 +2220,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev, const struct ethtool_ops *ops = dev->ethtool_ops; int n_stats, ret; - if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats) + if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats) return -EOPNOTSUPP; n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS); From 09fe2bfa6b83f865126ce3964744863f69a4a030 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 6 Jun 2024 21:14:45 +1000 Subject: [PATCH 381/537] ata: pata_macio: Fix max_segment_size with PAGE_SIZE == 64K The pata_macio driver advertises a max_segment_size of 0xff00, because the hardware doesn't cope with requests >= 64K. However the SCSI core requires max_segment_size to be at least PAGE_SIZE, which is a problem for pata_macio when the kernel is built with 64K pages. In older kernels the SCSI core would just increase the segment size to be equal to PAGE_SIZE, however since the commit tagged below it causes a warning and the device fails to probe: WARNING: CPU: 0 PID: 26 at block/blk-settings.c:202 .blk_validate_limits+0x2f8/0x35c CPU: 0 PID: 26 Comm: kworker/u4:1 Not tainted 6.10.0-rc1 #1 Hardware name: PowerMac7,2 PPC970 0x390202 PowerMac ... NIP .blk_validate_limits+0x2f8/0x35c LR .blk_alloc_queue+0xc0/0x2f8 Call Trace: .blk_alloc_queue+0xc0/0x2f8 .blk_mq_alloc_queue+0x60/0xf8 .scsi_alloc_sdev+0x208/0x3c0 .scsi_probe_and_add_lun+0x314/0x52c .__scsi_add_device+0x170/0x1a4 .ata_scsi_scan_host+0x2bc/0x3e4 .async_port_probe+0x6c/0xa0 .async_run_entry_fn+0x60/0x1bc .process_one_work+0x228/0x510 .worker_thread+0x360/0x530 .kthread+0x134/0x13c .start_kernel_thread+0x10/0x14 ... scsi_alloc_sdev: Allocation failure during SCSI scanning, some SCSI devices might not be configured Although the hardware can't cope with a 64K segment, the driver already deals with that internally by splitting large requests in pata_macio_qc_prep(). That is how the driver has managed to function until now on 64K kernels. So fix the driver to advertise a max_segment_size of 64K, which avoids the warning and keeps the SCSI core happy. Fixes: afd53a3d8528 ("scsi: core: Initialize scsi midlayer limits before allocating the queue") Reported-by: Guenter Roeck Closes: https://lore.kernel.org/all/ce2bf6af-4382-4fe1-b392-cc6829f5ceb2@roeck-us.net/ Reported-by: Doru Iorgulescu Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218858 Signed-off-by: Michael Ellerman Reviewed-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: John Garry Signed-off-by: Niklas Cassel --- drivers/ata/pata_macio.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 817838e2f70e0..3cb455a32d926 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -915,10 +915,13 @@ static const struct scsi_host_template pata_macio_sht = { .sg_tablesize = MAX_DCMDS, /* We may not need that strict one */ .dma_boundary = ATA_DMA_BOUNDARY, - /* Not sure what the real max is but we know it's less than 64K, let's - * use 64K minus 256 + /* + * The SCSI core requires the segment size to cover at least a page, so + * for 64K page size kernels this must be at least 64K. However the + * hardware can't handle 64K, so pata_macio_qc_prep() will split large + * requests. */ - .max_segment_size = MAX_DBDMA_SEG, + .max_segment_size = SZ_64K, .device_configure = pata_macio_device_configure, .sdev_groups = ata_common_sdev_groups, .can_queue = ATA_DEF_QUEUE, From 41b02ea4c0adfcc6761fbfed42c3ce6b6412d881 Mon Sep 17 00:00:00 2001 From: "Matthieu Baerts (NGI0)" Date: Wed, 5 Jun 2024 11:21:16 +0200 Subject: [PATCH 382/537] selftests: net: lib: support errexit with busywait If errexit is enabled ('set -e'), loopy_wait -- or busywait and others using it -- will stop after the first failure. Note that if the returned status of loopy_wait is checked, and even if errexit is enabled, Bash will not stop at the first error. Fixes: 25ae948b4478 ("selftests/net: add lib.sh") Cc: stable@vger.kernel.org Acked-by: Geliang Tang Signed-off-by: Matthieu Baerts (NGI0) Reviewed-by: Hangbin Liu Link: https://lore.kernel.org/r/20240605-upstream-net-20240605-selftests-net-lib-fixes-v1-1-b3afadd368c9@kernel.org Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/lib.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index edc030e81a464..a422e10d3d3a0 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -67,9 +67,7 @@ loopy_wait() while true do local out - out=$("$@") - local ret=$? - if ((!ret)); then + if out=$("$@"); then echo -n "$out" return 0 fi From 79322174bcc780b99795cb89d237b26006a8b94b Mon Sep 17 00:00:00 2001 From: "Matthieu Baerts (NGI0)" Date: Wed, 5 Jun 2024 11:21:17 +0200 Subject: [PATCH 383/537] selftests: net: lib: avoid error removing empty netns name If there is an error to create the first netns with 'setup_ns()', 'cleanup_ns()' will be called with an empty string as first parameter. The consequences is that 'cleanup_ns()' will try to delete an invalid netns, and wait 20 seconds if the netns list is empty. Instead of just checking if the name is not empty, convert the string separated by spaces to an array. Manipulating the array is cleaner, and calling 'cleanup_ns()' with an empty array will be a no-op. Fixes: 25ae948b4478 ("selftests/net: add lib.sh") Cc: stable@vger.kernel.org Acked-by: Geliang Tang Signed-off-by: Matthieu Baerts (NGI0) Reviewed-by: Petr Machata Reviewed-by: Hangbin Liu Link: https://lore.kernel.org/r/20240605-upstream-net-20240605-selftests-net-lib-fixes-v1-2-b3afadd368c9@kernel.org Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/lib.sh | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index a422e10d3d3a0..e2f51102d7e1e 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -15,7 +15,7 @@ ksft_xfail=2 ksft_skip=4 # namespace list created by setup_ns -NS_LIST="" +NS_LIST=() ############################################################################## # Helpers @@ -137,6 +137,7 @@ cleanup_ns() fi for ns in "$@"; do + [ -z "${ns}" ] && continue ip netns delete "${ns}" &> /dev/null if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then echo "Warn: Failed to remove namespace $ns" @@ -150,7 +151,7 @@ cleanup_ns() cleanup_all_ns() { - cleanup_ns $NS_LIST + cleanup_ns "${NS_LIST[@]}" } # setup netns with given names as prefix. e.g @@ -159,7 +160,7 @@ setup_ns() { local ns="" local ns_name="" - local ns_list="" + local ns_list=() local ns_exist= for ns_name in "$@"; do # Some test may setup/remove same netns multi times @@ -175,13 +176,13 @@ setup_ns() if ! ip netns add "$ns"; then echo "Failed to create namespace $ns_name" - cleanup_ns "$ns_list" + cleanup_ns "${ns_list[@]}" return $ksft_skip fi ip -n "$ns" link set lo up - ! $ns_exist && ns_list="$ns_list $ns" + ! $ns_exist && ns_list+=("$ns") done - NS_LIST="$NS_LIST $ns_list" + NS_LIST+=("${ns_list[@]}") } tc_rule_stats_get() From 84a8bc3ec225b28067b168e9410e452c83d706da Mon Sep 17 00:00:00 2001 From: "Matthieu Baerts (NGI0)" Date: Wed, 5 Jun 2024 11:21:18 +0200 Subject: [PATCH 384/537] selftests: net: lib: set 'i' as local Without this, the 'i' variable declared before could be overridden by accident, e.g. for i in "${@}"; do __ksft_status_merge "${i}" ## 'i' has been modified foo "${i}" ## using 'i' with an unexpected value done After a quick look, it looks like 'i' is currently not used after having been modified in __ksft_status_merge(), but still, better be safe than sorry. I saw this while modifying the same file, not because I suspected an issue somewhere. Fixes: 596c8819cb78 ("selftests: forwarding: Have RET track kselftest framework constants") Acked-by: Geliang Tang Signed-off-by: Matthieu Baerts (NGI0) Reviewed-by: Hangbin Liu Link: https://lore.kernel.org/r/20240605-upstream-net-20240605-selftests-net-lib-fixes-v1-3-b3afadd368c9@kernel.org Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/lib.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index e2f51102d7e1e..9155c914c064f 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -27,6 +27,7 @@ __ksft_status_merge() local -A weights local weight=0 + local i for i in "$@"; do weights[$i]=$((weight++)) done From 3ac36aa7307363b7247ccb6f6a804e11496b2b36 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 29 May 2024 09:42:05 +0200 Subject: [PATCH 385/537] x86/mm/numa: Use NUMA_NO_NODE when calling memblock_set_node() memblock_set_node() warns about using MAX_NUMNODES, see e0eec24e2e19 ("memblock: make memblock_set_node() also warn about use of MAX_NUMNODES") for details. Reported-by: Narasimhan V Signed-off-by: Jan Beulich Cc: stable@vger.kernel.org [bp: commit message] Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Mike Rapoport (IBM) Tested-by: Paul E. McKenney Link: https://lore.kernel.org/r/20240603141005.23261-1-bp@kernel.org Link: https://lore.kernel.org/r/abadb736-a239-49e4-ab42-ace7acdd4278@suse.com Signed-off-by: Mike Rapoport (IBM) --- arch/x86/mm/numa.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index ce84ba86e69e9..6ce10e3c62285 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -493,7 +493,7 @@ static void __init numa_clear_kernel_node_hotplug(void) for_each_reserved_mem_region(mb_region) { int nid = memblock_get_region_node(mb_region); - if (nid != MAX_NUMNODES) + if (nid != NUMA_NO_NODE) node_set(nid, reserved_nodemask); } @@ -614,9 +614,9 @@ static int __init numa_init(int (*init_func)(void)) nodes_clear(node_online_map); memset(&numa_meminfo, 0, sizeof(numa_meminfo)); WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory, - MAX_NUMNODES)); + NUMA_NO_NODE)); WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved, - MAX_NUMNODES)); + NUMA_NO_NODE)); /* In case that parsing SRAT failed. */ WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX)); numa_reset_distance(); From f3a5367c679d31473d3fbb391675055b4792c309 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 6 Jun 2024 11:01:51 +0930 Subject: [PATCH 386/537] btrfs: protect folio::private when attaching extent buffer folios MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [BUG] Since v6.8 there are rare kernel crashes reported by various people, the common factor is bad page status error messages like this: BUG: Bad page state in process kswapd0 pfn:d6e840 page: refcount:0 mapcount:0 mapping:000000007512f4f2 index:0x2796c2c7c pfn:0xd6e840 aops:btree_aops ino:1 flags: 0x17ffffe0000008(uptodate|node=0|zone=2|lastcpupid=0x3fffff) page_type: 0xffffffff() raw: 0017ffffe0000008 dead000000000100 dead000000000122 ffff88826d0be4c0 raw: 00000002796c2c7c 0000000000000000 00000000ffffffff 0000000000000000 page dumped because: non-NULL mapping [CAUSE] Commit 09e6cef19c9f ("btrfs: refactor alloc_extent_buffer() to allocate-then-attach method") changes the sequence when allocating a new extent buffer. Previously we always called grab_extent_buffer() under mapping->i_private_lock, to ensure the safety on modification on folio::private (which is a pointer to extent buffer for regular sectorsize). This can lead to the following race: Thread A is trying to allocate an extent buffer at bytenr X, with 4 4K pages, meanwhile thread B is trying to release the page at X + 4K (the second page of the extent buffer at X). Thread A | Thread B -----------------------------------+------------------------------------- | btree_release_folio() | | This is for the page at X + 4K, | | Not page X. | | alloc_extent_buffer() | |- release_extent_buffer() |- filemap_add_folio() for the | | |- atomic_dec_and_test(eb->refs) | page at bytenr X (the first | | | | page). | | | | Which returned -EEXIST. | | | | | | | |- filemap_lock_folio() | | | | Returned the first page locked. | | | | | | | |- grab_extent_buffer() | | | | |- atomic_inc_not_zero() | | | | | Returned false | | | | |- folio_detach_private() | | |- folio_detach_private() for X | |- folio_test_private() | | |- folio_test_private() | Returned true | | | Returned true |- folio_put() | |- folio_put() Now there are two puts on the same folio at folio X, leading to refcount underflow of the folio X, and eventually causing the BUG_ON() on the page->mapping. The condition is not that easy to hit: - The release must be triggered for the middle page of an eb If the release is on the same first page of an eb, page lock would kick in and prevent the race. - folio_detach_private() has a very small race window It's only between folio_test_private() and folio_clear_private(). That's exactly when mapping->i_private_lock is used to prevent such race, and commit 09e6cef19c9f ("btrfs: refactor alloc_extent_buffer() to allocate-then-attach method") screwed that up. At that time, I thought the page lock would kick in as filemap_release_folio() also requires the page to be locked, but forgot the filemap_release_folio() only locks one page, not all pages of an extent buffer. [FIX] Move all the code requiring i_private_lock into attach_eb_folio_to_filemap(), so that everything is done with proper lock protection. Furthermore to prevent future problems, add an extra lockdep_assert_locked() to ensure we're holding the proper lock. To reproducer that is able to hit the race (takes a few minutes with instrumented code inserting delays to alloc_extent_buffer()): #!/bin/sh drop_caches () { while(true); do echo 3 > /proc/sys/vm/drop_caches echo 1 > /proc/sys/vm/compact_memory done } run_tar () { while(true); do for x in `seq 1 80` ; do tar cf /dev/zero /mnt > /dev/null & done wait done } mkfs.btrfs -f -d single -m single /dev/vda mount -o noatime /dev/vda /mnt # create 200,000 files, 1K each ./simoop -n 200000 -E -f 1k /mnt drop_caches & (run_tar) Reported-by: Linus Torvalds Link: https://lore.kernel.org/linux-btrfs/CAHk-=wgt362nGfScVOOii8cgKn2LVVHeOvOA7OBwg1OwbuJQcw@mail.gmail.com/ Reported-by: Mikhail Gavrilov Link: https://lore.kernel.org/lkml/CABXGCsPktcHQOvKTbPaTwegMExije=Gpgci5NW=hqORo-s7diA@mail.gmail.com/ Reported-by: Toralf Förster Link: https://lore.kernel.org/linux-btrfs/e8b3311c-9a75-4903-907f-fc0f7a3fe423@gmx.de/ Reported-by: syzbot+f80b066392366b4af85e@syzkaller.appspotmail.com Fixes: 09e6cef19c9f ("btrfs: refactor alloc_extent_buffer() to allocate-then-attach method") CC: stable@vger.kernel.org # 6.8+ CC: Chris Mason Reviewed-by: Filipe Manana Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 60 +++++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 597387e9f0400..f688fab55251e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3689,6 +3689,8 @@ static struct extent_buffer *grab_extent_buffer( struct folio *folio = page_folio(page); struct extent_buffer *exists; + lockdep_assert_held(&page->mapping->i_private_lock); + /* * For subpage case, we completely rely on radix tree to ensure we * don't try to insert two ebs for the same bytenr. So here we always @@ -3756,13 +3758,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start) * The caller needs to free the existing folios and retry using the same order. */ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, + struct btrfs_subpage *prealloc, struct extent_buffer **found_eb_ret) { struct btrfs_fs_info *fs_info = eb->fs_info; struct address_space *mapping = fs_info->btree_inode->i_mapping; const unsigned long index = eb->start >> PAGE_SHIFT; - struct folio *existing_folio; + struct folio *existing_folio = NULL; int ret; ASSERT(found_eb_ret); @@ -3774,12 +3777,14 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, ret = filemap_add_folio(mapping, eb->folios[i], index + i, GFP_NOFS | __GFP_NOFAIL); if (!ret) - return 0; + goto finish; existing_folio = filemap_lock_folio(mapping, index + i); /* The page cache only exists for a very short time, just retry. */ - if (IS_ERR(existing_folio)) + if (IS_ERR(existing_folio)) { + existing_folio = NULL; goto retry; + } /* For now, we should only have single-page folios for btree inode. */ ASSERT(folio_nr_pages(existing_folio) == 1); @@ -3790,14 +3795,13 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, return -EAGAIN; } - if (fs_info->nodesize < PAGE_SIZE) { - /* - * We're going to reuse the existing page, can drop our page - * and subpage structure now. - */ +finish: + spin_lock(&mapping->i_private_lock); + if (existing_folio && fs_info->nodesize < PAGE_SIZE) { + /* We're going to reuse the existing page, can drop our folio now. */ __free_page(folio_page(eb->folios[i], 0)); eb->folios[i] = existing_folio; - } else { + } else if (existing_folio) { struct extent_buffer *existing_eb; existing_eb = grab_extent_buffer(fs_info, @@ -3805,6 +3809,7 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, if (existing_eb) { /* The extent buffer still exists, we can use it directly. */ *found_eb_ret = existing_eb; + spin_unlock(&mapping->i_private_lock); folio_unlock(existing_folio); folio_put(existing_folio); return 1; @@ -3813,6 +3818,22 @@ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, __free_page(folio_page(eb->folios[i], 0)); eb->folios[i] = existing_folio; } + eb->folio_size = folio_size(eb->folios[i]); + eb->folio_shift = folio_shift(eb->folios[i]); + /* Should not fail, as we have preallocated the memory. */ + ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc); + ASSERT(!ret); + /* + * To inform we have an extra eb under allocation, so that + * detach_extent_buffer_page() won't release the folio private when the + * eb hasn't been inserted into radix tree yet. + * + * The ref will be decreased when the eb releases the page, in + * detach_extent_buffer_page(). Thus needs no special handling in the + * error path. + */ + btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]); + spin_unlock(&mapping->i_private_lock); return 0; } @@ -3824,7 +3845,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, int attached = 0; struct extent_buffer *eb; struct extent_buffer *existing_eb = NULL; - struct address_space *mapping = fs_info->btree_inode->i_mapping; struct btrfs_subpage *prealloc = NULL; u64 lockdep_owner = owner_root; bool page_contig = true; @@ -3890,7 +3910,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, for (int i = 0; i < num_folios; i++) { struct folio *folio; - ret = attach_eb_folio_to_filemap(eb, i, &existing_eb); + ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb); if (ret > 0) { ASSERT(existing_eb); goto out; @@ -3927,24 +3947,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, * and free the allocated page. */ folio = eb->folios[i]; - eb->folio_size = folio_size(folio); - eb->folio_shift = folio_shift(folio); - spin_lock(&mapping->i_private_lock); - /* Should not fail, as we have preallocated the memory */ - ret = attach_extent_buffer_folio(eb, folio, prealloc); - ASSERT(!ret); - /* - * To inform we have extra eb under allocation, so that - * detach_extent_buffer_page() won't release the folio private - * when the eb hasn't yet been inserted into radix tree. - * - * The ref will be decreased when the eb released the page, in - * detach_extent_buffer_page(). - * Thus needs no special handling in error path. - */ - btrfs_folio_inc_eb_refs(fs_info, folio); - spin_unlock(&mapping->i_private_lock); - WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len)); /* From bb93148ca831c30248270b4d81d3ccca23faa1bf Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Wed, 15 May 2024 19:22:22 +0300 Subject: [PATCH 387/537] MAINTAINERS: update Xe driver maintainers Because I left Intel, I'm removing myself from the list of Xe driver maintainers. Signed-off-by: Oded Gabbay Acked-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20240515162222.12958-3-ogabbay@kernel.org Signed-off-by: Lucas De Marchi (cherry picked from commit 8de6625dafd202da733188f632701e9109188035) Signed-off-by: Lucas De Marchi --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index d6c90161c7bfe..69f553485b9c2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11035,7 +11035,6 @@ F: include/uapi/drm/i915_drm.h INTEL DRM XE DRIVER (Lunar Lake and newer) M: Lucas De Marchi -M: Oded Gabbay M: Thomas Hellström L: intel-xe@lists.freedesktop.org S: Supported From a9f9b30e1748252d158f78a0c0affdc949671dd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Sun, 2 Jun 2024 21:09:59 +0200 Subject: [PATCH 388/537] MAINTAINERS: Update Xe driver maintainers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add Rodrigo Vivi as an Xe driver maintainer. v2: - Cc also Lucas De Marchi (Rodrigo vivi) - Remove a blank line in commit the commit message (Lucas De Marchi) Cc: David Airlie Cc: Daniel Vetter Cc: Rodrigo Vivi Cc: Lucas De Marchi Cc: dri-devel@lists.freedesktop.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Thomas Hellström Acked-by: Rodrigo Vivi Acked-by: Lucas De Marchi Acked-by: Jani Nikula Acked-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20240602190959.2981-1-thomas.hellstrom@linux.intel.com Signed-off-by: Lucas De Marchi --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 69f553485b9c2..c517af00df84d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11036,6 +11036,7 @@ F: include/uapi/drm/i915_drm.h INTEL DRM XE DRIVER (Lunar Lake and newer) M: Lucas De Marchi M: Thomas Hellström +M: Rodrigo Vivi L: intel-xe@lists.freedesktop.org S: Supported W: https://drm.pages.freedesktop.org/intel-docs/ From 96c965667b9dbbd713acdffa95ebab8c225f8595 Mon Sep 17 00:00:00 2001 From: Richard Acayan Date: Wed, 5 Jun 2024 13:58:09 -0400 Subject: [PATCH 389/537] kbuild: explicitly run mksysmap as sed script from link-vmlinux.sh In commit b18b047002b7 ("kbuild: change scripts/mksysmap into sed script"), the mksysmap script was transformed into a sed script, made directly executable with "#!/bin/sed -f". Apparently, the path to sed is different on NixOS. The shebang can't use the env command, otherwise the "sed -f" command would be treated as a single argument. This can be solved with the -S flag, but that is a GNU extension. Explicitly use sed instead of relying on the executable shebang to fix NixOS builds without breaking build environments using Busybox. Fixes: b18b047002b7 ("kbuild: change scripts/mksysmap into sed script") Reported-by: Kent Overstreet Signed-off-by: Richard Acayan Reviewed-by: Nathan Chancellor Tested-by: Dmitry Safonov <0x7f454c46@gmail.com> Signed-off-by: Masahiro Yamada --- scripts/link-vmlinux.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 46ce5d04dbeb1..518c70b8db507 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -193,7 +193,7 @@ kallsyms_step() mksysmap() { info NM ${2} - ${NM} -n "${1}" | "${srctree}/scripts/mksysmap" > "${2}" + ${NM} -n "${1}" | sed -f "${srctree}/scripts/mksysmap" > "${2}" } sorttable() From 9185afeac2a3dcce8300a5684291a43c2838cfd6 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 7 Jun 2024 03:36:12 +0900 Subject: [PATCH 390/537] modpost: do not warn about missing MODULE_DESCRIPTION() for vmlinux.o Building with W=1 incorrectly emits the following warning: WARNING: modpost: missing MODULE_DESCRIPTION() in vmlinux.o This check should apply only to modules. Fixes: 1fffe7a34c89 ("script: modpost: emit a warning when the description is missing") Signed-off-by: Masahiro Yamada Reviewed-by: Vincenzo Palazzo --- scripts/mod/modpost.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 937294ff164fc..f48d72d22dc2a 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1647,10 +1647,11 @@ static void read_symbols(const char *modname) namespace = get_next_modinfo(&info, "import_ns", namespace); } + + if (extra_warn && !get_modinfo(&info, "description")) + warn("missing MODULE_DESCRIPTION() in %s\n", modname); } - if (extra_warn && !get_modinfo(&info, "description")) - warn("missing MODULE_DESCRIPTION() in %s\n", modname); for (sym = info.symtab_start; sym < info.symtab_stop; sym++) { symname = remove_dot(info.strtab + sym->st_name); From e96b2933152fd87b6a41765b2f58b158fde855b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cs=C3=B3k=C3=A1s=2C=20Bence?= Date: Wed, 5 Jun 2024 10:42:51 +0200 Subject: [PATCH 391/537] net: sfp: Always call `sfp_sm_mod_remove()` on remove MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the module is in SFP_MOD_ERROR, `sfp_sm_mod_remove()` will not be run. As a consequence, `sfp_hwmon_remove()` is not getting run either, leaving a stale `hwmon` device behind. `sfp_sm_mod_remove()` itself checks `sfp->sm_mod_state` anyways, so this check was not really needed in the first place. Fixes: d2e816c0293f ("net: sfp: handle module remove outside state machine") Signed-off-by: "Csókás, Bence" Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20240605084251.63502-1-csokas.bence@prolan.hu Signed-off-by: Jakub Kicinski --- drivers/net/phy/sfp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 3f9cbd797fd67..a5684ef5884bd 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -2429,8 +2429,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event) /* Handle remove event globally, it resets this state machine */ if (event == SFP_E_REMOVE) { - if (sfp->sm_mod_state > SFP_MOD_PROBE) - sfp_sm_mod_remove(sfp); + sfp_sm_mod_remove(sfp); sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0); return; } From 02c418774f76a0a36a6195c9dbf8971eb4130a15 Mon Sep 17 00:00:00 2001 From: Enzo Matsumiya Date: Thu, 6 Jun 2024 13:13:13 -0300 Subject: [PATCH 392/537] smb: client: fix deadlock in smb2_find_smb_tcon() Unlock cifs_tcp_ses_lock before calling cifs_put_smb_ses() to avoid such deadlock. Cc: stable@vger.kernel.org Signed-off-by: Enzo Matsumiya Reviewed-by: Shyam Prasad N Reviewed-by: Paulo Alcantara (Red Hat) Signed-off-by: Steve French --- fs/smb/client/smb2transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c index 02135a6053051..1476c445cadcf 100644 --- a/fs/smb/client/smb2transport.c +++ b/fs/smb/client/smb2transport.c @@ -216,8 +216,8 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid) } tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid); if (!tcon) { - cifs_put_smb_ses(ses); spin_unlock(&cifs_tcp_ses_lock); + cifs_put_smb_ses(ses); return NULL; } spin_unlock(&cifs_tcp_ses_lock); From a88d60903696c01de577558080ec4fc738a70475 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 31 May 2024 15:53:42 +0100 Subject: [PATCH 393/537] cifs: Don't advance the I/O iterator before terminating subrequest There's now no need to make sure subreq->io_iter is advanced to match subreq->transferred before calling one of the netfs subrequest termination functions as the check has been removed netfslib and the iterator is reset prior to retrying a subreq. Fixes: 3ee1a1fc3981 ("cifs: Cut over to using netfslib") Signed-off-by: David Howells cc: Steve French cc: Paulo Alcantara cc: Shyam Prasad N cc: Rohith Surabattula cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org Signed-off-by: Steve French --- fs/smb/client/smb2pdu.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 993ac36c3d585..38a06e8a0f90f 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -4577,8 +4577,6 @@ smb2_readv_callback(struct mid_q_entry *mid) if (rdata->subreq.start < rdata->subreq.rreq->i_size) rdata->result = 0; } - if (rdata->result == 0 || rdata->result == -EAGAIN) - iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes); rdata->credits.value = 0; netfs_subreq_terminated(&rdata->subreq, (rdata->result == 0 || rdata->result == -EAGAIN) ? @@ -4789,7 +4787,6 @@ smb2_writev_callback(struct mid_q_entry *mid) wdata->result = -ENOSPC; else wdata->subreq.len = written; - iov_iter_advance(&wdata->subreq.io_iter, written); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: From 64054eb716db52e4246527dc9414377c5bc5b01d Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Thu, 6 Jun 2024 20:23:50 -0700 Subject: [PATCH 394/537] gpio: add missing MODULE_DESCRIPTION() macros On x86, make allmodconfig && make W=1 C=1 reports: WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/gpio/gpio-gw-pld.o WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/gpio/gpio-mc33880.o WARNING: modpost: missing MODULE_DESCRIPTION() in drivers/gpio/gpio-pcf857x.o Add the missing invocations of the MODULE_DESCRIPTION() macro, including the one missing in gpio-pl061.c, which is not built for x86. Signed-off-by: Jeff Johnson Link: https://lore.kernel.org/r/20240606-md-drivers-gpio-v1-1-cb42d240ca5c@quicinc.com Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpio-gw-pld.c | 1 + drivers/gpio/gpio-mc33880.c | 1 + drivers/gpio/gpio-pcf857x.c | 1 + drivers/gpio/gpio-pl061.c | 1 + 4 files changed, 4 insertions(+) diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c index 899335da93c78..7e29a2d8de1aa 100644 --- a/drivers/gpio/gpio-gw-pld.c +++ b/drivers/gpio/gpio-gw-pld.c @@ -130,5 +130,6 @@ static struct i2c_driver gw_pld_driver = { }; module_i2c_driver(gw_pld_driver); +MODULE_DESCRIPTION("Gateworks I2C PLD GPIO expander"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linus Walleij "); diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c index cd9b16dbe1a97..94f6fefc011bf 100644 --- a/drivers/gpio/gpio-mc33880.c +++ b/drivers/gpio/gpio-mc33880.c @@ -168,5 +168,6 @@ static void __exit mc33880_exit(void) module_exit(mc33880_exit); MODULE_AUTHOR("Mocean Laboratories "); +MODULE_DESCRIPTION("MC33880 high-side/low-side switch GPIO driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 53b69abe67871..7c57eaeb0afeb 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -438,5 +438,6 @@ static void __exit pcf857x_exit(void) } module_exit(pcf857x_exit); +MODULE_DESCRIPTION("Driver for pcf857x, pca857x, and pca967x I2C GPIO expanders"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Brownell"); diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 9fc1f3dd4190d..a211a02d4b4a6 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -438,4 +438,5 @@ static struct amba_driver pl061_gpio_driver = { }; module_amba_driver(pl061_gpio_driver); +MODULE_DESCRIPTION("Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061)"); MODULE_LICENSE("GPL v2"); From 89e1ee118d6f0ee6bd6e80d8fe08839875daa241 Mon Sep 17 00:00:00 2001 From: Andrew Ballance Date: Sun, 2 Jun 2024 03:50:23 -0500 Subject: [PATCH 395/537] hid: asus: asus_report_fixup: fix potential read out of bounds syzbot reported a potential read out of bounds in asus_report_fixup. this patch adds checks so that a read out of bounds will not occur Signed-off-by: Andrew Ballance Reported-by: Closes: https://syzkaller.appspot.com/bug?extid=07762f019fd03d01f04c Fixes: 59d2f5b7392e ("HID: asus: fix more n-key report descriptors if n-key quirked") Link: https://lore.kernel.org/r/20240602085023.1720492-1-andrewjballance@gmail.com Signed-off-by: Benjamin Tissoires --- drivers/hid/hid-asus.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 02de2bf4f7907..37e6d25593c21 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -1204,8 +1204,8 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, } /* match many more n-key devices */ - if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) { - for (int i = 0; i < *rsize + 1; i++) { + if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) { + for (int i = 0; i < *rsize - 15; i++) { /* offset to the count from 0x5a report part always 14 */ if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a && rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) { From 0c7dd00de018ff70b3452c424901816e26366a8a Mon Sep 17 00:00:00 2001 From: Aseda Aboagye Date: Tue, 4 Jun 2024 23:10:47 +0000 Subject: [PATCH 396/537] input: Add event code for accessibility key HUTRR116 added support for a new usage titled "System Accessibility Binding" which toggles a system-wide bound accessibility UI or command. This commit simply adds a new event code for the usage. Signed-off-by: Aseda Aboagye Acked-by: Dmitry Torokhov Link: https://lore.kernel.org/r/Zl-e97O9nvudco5z@google.com Signed-off-by: Benjamin Tissoires --- drivers/hid/hid-debug.c | 1 + drivers/hid/hid-input.c | 1 + include/uapi/linux/input-event-codes.h | 1 + 3 files changed, 3 insertions(+) diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 87a961cae7756..322ed63a98abe 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -3366,6 +3366,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable", [KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable", [KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle", + [KEY_ACCESSIBILITY] = "Accessibility", [KEY_DICTATE] = "Dictate", [KEY_MICMUTE] = "MicrophoneMute", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index e03d300d2bac4..d5a6e89c3086e 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -836,6 +836,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */ switch (usage->hid & 0xf) { case 0x9: map_key_clear(KEY_MICMUTE); break; + case 0xa: map_key_clear(KEY_ACCESSIBILITY); break; default: goto ignore; } break; diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 03edf2ccdf6c8..39f11ec676fae 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -618,6 +618,7 @@ #define KEY_CAMERA_ACCESS_ENABLE 0x24b /* Enables programmatic access to camera devices. (HUTRR72) */ #define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */ #define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */ +#define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ From 22d6d060ac77955291deb43efc2f3f4f9632c6cb Mon Sep 17 00:00:00 2001 From: Aseda Aboagye Date: Tue, 4 Jun 2024 23:16:32 +0000 Subject: [PATCH 397/537] input: Add support for "Do Not Disturb" HUTRR94 added support for a new usage titled "System Do Not Disturb" which toggles a system-wide Do Not Disturb setting. This commit simply adds a new event code for the usage. Signed-off-by: Aseda Aboagye Acked-by: Dmitry Torokhov Link: https://lore.kernel.org/r/Zl-gUHE70s7wCAoB@google.com Signed-off-by: Benjamin Tissoires --- drivers/hid/hid-debug.c | 1 + drivers/hid/hid-input.c | 8 ++++++++ include/uapi/linux/input-event-codes.h | 1 + 3 files changed, 10 insertions(+) diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 322ed63a98abe..d5abfe652fb50 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -3367,6 +3367,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable", [KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle", [KEY_ACCESSIBILITY] = "Accessibility", + [KEY_DO_NOT_DISTURB] = "DoNotDisturb", [KEY_DICTATE] = "Dictate", [KEY_MICMUTE] = "MicrophoneMute", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index d5a6e89c3086e..8bb16e9b94aa5 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -833,6 +833,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; } + if ((usage->hid & 0xf0) == 0x90) { /* SystemControl*/ + switch (usage->hid & 0xf) { + case 0xb: map_key_clear(KEY_DO_NOT_DISTURB); break; + default: goto ignore; + } + break; + } + if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */ switch (usage->hid & 0xf) { case 0x9: map_key_clear(KEY_MICMUTE); break; diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 39f11ec676fae..a4206723f5033 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -619,6 +619,7 @@ #define KEY_CAMERA_ACCESS_DISABLE 0x24c /* Disables programmatic access to camera devices. (HUTRR72) */ #define KEY_CAMERA_ACCESS_TOGGLE 0x24d /* Toggles the current state of the camera access control. (HUTRR72) */ #define KEY_ACCESSIBILITY 0x24e /* Toggles the system bound accessibility UI/command (HUTRR116) */ +#define KEY_DO_NOT_DISTURB 0x24f /* Toggles the system-wide "Do Not Disturb" control (HUTRR94)*/ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ From 8d3ae46c64336b538898af76b4f504dfbe89b886 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 7 May 2024 16:48:15 +0200 Subject: [PATCH 398/537] dt-bindings: HID: i2c-hid: add dedicated Ilitek ILI2901 schema The Ilitek ILI2901 touch screen controller was apparently incorrectly added to the Elan eKTH6915 schema simply because it also has a reset gpio and is currently managed by the Elan driver in Linux. The two controllers are not related even if an unfortunate wording in the commit message adding the Ilitek compatible made it sound like they were. Add a dedicated schema for the ILI2901 which does not specify the I2C address (which is likely 0x41 rather than 0x10 as for other Ilitek touch controllers) to avoid cluttering the Elan schema with unrelated devices and to make it easier to find the correct schema when adding further Ilitek controllers. Fixes: d74ac6f60a7e ("dt-bindings: HID: i2c-hid: elan: Introduce Ilitek ili2901") Cc: Zhengqiao Xia Reviewed-by: Krzysztof Kozlowski Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20240507144821.12275-2-johan+linaro@kernel.org Signed-off-by: Benjamin Tissoires --- .../bindings/input/elan,ekth6915.yaml | 1 - .../bindings/input/ilitek,ili2901.yaml | 66 +++++++++++++++++++ 2 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 Documentation/devicetree/bindings/input/ilitek,ili2901.yaml diff --git a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml index dc4ac41f24411..c1fcf8c90c24d 100644 --- a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml +++ b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml @@ -20,7 +20,6 @@ properties: compatible: enum: - elan,ekth6915 - - ilitek,ili2901 reg: const: 0x10 diff --git a/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml b/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml new file mode 100644 index 0000000000000..1abeec768d79c --- /dev/null +++ b/Documentation/devicetree/bindings/input/ilitek,ili2901.yaml @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/ilitek,ili2901.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Ilitek ILI2901 touchscreen controller + +maintainers: + - Jiri Kosina + +description: + Supports the Ilitek ILI2901 touchscreen controller. + This touchscreen controller uses the i2c-hid protocol with a reset GPIO. + +allOf: + - $ref: /schemas/input/touchscreen/touchscreen.yaml# + +properties: + compatible: + enum: + - ilitek,ili2901 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + panel: true + + reset-gpios: + maxItems: 1 + + vcc33-supply: true + + vccio-supply: true + +required: + - compatible + - reg + - interrupts + - vcc33-supply + +additionalProperties: false + +examples: + - | + #include + #include + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + touchscreen@41 { + compatible = "ilitek,ili2901"; + reg = <0x41>; + + interrupt-parent = <&tlmm>; + interrupts = <9 IRQ_TYPE_LEVEL_LOW>; + + reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>; + vcc33-supply = <&pp3300_ts>; + }; + }; From 07fc16fa55522520f5e4b2e35c72650f11079c35 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 7 May 2024 16:48:16 +0200 Subject: [PATCH 399/537] dt-bindings: HID: i2c-hid: elan: add Elan eKTH5015M Add a compatible string for the Elan eKTH5015M touch controller. Judging from the current binding and commit bd3cba00dcc6 ("HID: i2c-hid: elan: Add support for Elan eKTH6915 i2c-hid touchscreens"), eKTH5015M appears to be compatible with eKTH6915. Notably the power-on sequence is the same. While at it, drop a redundant label from the example. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20240507144821.12275-3-johan+linaro@kernel.org Signed-off-by: Benjamin Tissoires --- .../devicetree/bindings/input/elan,ekth6915.yaml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml index c1fcf8c90c24d..be84f7ed0abc9 100644 --- a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml +++ b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml @@ -18,8 +18,12 @@ allOf: properties: compatible: - enum: - - elan,ekth6915 + oneOf: + - items: + - enum: + - elan,ekth5015m + - const: elan,ekth6915 + - const: elan,ekth6915 reg: const: 0x10 @@ -57,8 +61,8 @@ examples: #address-cells = <1>; #size-cells = <0>; - ap_ts: touchscreen@10 { - compatible = "elan,ekth6915"; + touchscreen@10 { + compatible = "elan,ekth5015m", "elan,ekth6915"; reg = <0x10>; interrupt-parent = <&tlmm>; From e538d4b85b8f1e3534dfbb42c2273f18bbb59d6e Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 7 May 2024 16:48:17 +0200 Subject: [PATCH 400/537] dt-bindings: HID: i2c-hid: elan: add 'no-reset-on-power-off' property When the power supply is shared with other peripherals the reset line can be wired in such a way that it can remain deasserted regardless of whether the supply is on or not. This is important as it can be used to avoid holding the controller in reset for extended periods of time when it remains powered, something which can lead to increased power consumption. Leaving reset deasserted also avoids leaking current through the reset circuitry pull-up resistors. Add a new 'no-reset-on-power-off' devicetree property which can be used by the OS to determine when reset needs to be asserted on power down. Note that this property can also be used when the supply cannot be turned off by the OS at all. Signed-off-by: Johan Hovold Reviewed-by: Krzysztof Kozlowski Reviewed-by: Linus Walleij Link: https://lore.kernel.org/r/20240507144821.12275-4-johan+linaro@kernel.org Signed-off-by: Benjamin Tissoires --- Documentation/devicetree/bindings/input/elan,ekth6915.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml index be84f7ed0abc9..a62916d07a08c 100644 --- a/Documentation/devicetree/bindings/input/elan,ekth6915.yaml +++ b/Documentation/devicetree/bindings/input/elan,ekth6915.yaml @@ -36,6 +36,12 @@ properties: reset-gpios: description: Reset GPIO; not all touchscreens using eKTH6915 hook this up. + no-reset-on-power-off: + type: boolean + description: + Reset line is wired so that it can (and should) be left deasserted when + the power supply is off. + vcc33-supply: description: The 3.3V supply to the touchscreen. From 0eafc58f2194dbd01d4be40f99a697681171995b Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 7 May 2024 16:48:18 +0200 Subject: [PATCH 401/537] HID: i2c-hid: elan: fix reset suspend current leakage The Elan eKTH5015M touch controller found on the Lenovo ThinkPad X13s shares the VCC33 supply with other peripherals that may remain powered during suspend (e.g. when enabled as wakeup sources). The reset line is also wired so that it can be left deasserted when the supply is off. This is important as it avoids holding the controller in reset for extended periods of time when it remains powered, which can lead to increased power consumption, and also avoids leaking current through the X13s reset circuitry during suspend (and after driver unbind). Use the new 'no-reset-on-power-off' devicetree property to determine when reset needs to be asserted on power down. Notably this also avoids wasting power on machine variants without a touchscreen for which the driver would otherwise exit probe with reset asserted. Fixes: bd3cba00dcc6 ("HID: i2c-hid: elan: Add support for Elan eKTH6915 i2c-hid touchscreens") Cc: # 6.0 Cc: Douglas Anderson Tested-by: Steev Klimaszewski Signed-off-by: Johan Hovold Reviewed-by: Douglas Anderson Link: https://lore.kernel.org/r/20240507144821.12275-5-johan+linaro@kernel.org Signed-off-by: Benjamin Tissoires --- drivers/hid/i2c-hid/i2c-hid-of-elan.c | 59 +++++++++++++++++++++------ 1 file changed, 47 insertions(+), 12 deletions(-) diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c index 5b91fb106cfc3..091e37933225a 100644 --- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c +++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c @@ -31,6 +31,7 @@ struct i2c_hid_of_elan { struct regulator *vcc33; struct regulator *vccio; struct gpio_desc *reset_gpio; + bool no_reset_on_power_off; const struct elan_i2c_hid_chip_data *chip_data; }; @@ -40,17 +41,17 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops) container_of(ops, struct i2c_hid_of_elan, ops); int ret; + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + if (ihid_elan->vcc33) { ret = regulator_enable(ihid_elan->vcc33); if (ret) - return ret; + goto err_deassert_reset; } ret = regulator_enable(ihid_elan->vccio); - if (ret) { - regulator_disable(ihid_elan->vcc33); - return ret; - } + if (ret) + goto err_disable_vcc33; if (ihid_elan->chip_data->post_power_delay_ms) msleep(ihid_elan->chip_data->post_power_delay_ms); @@ -60,6 +61,15 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops) msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms); return 0; + +err_disable_vcc33: + if (ihid_elan->vcc33) + regulator_disable(ihid_elan->vcc33); +err_deassert_reset: + if (ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0); + + return ret; } static void elan_i2c_hid_power_down(struct i2chid_ops *ops) @@ -67,7 +77,14 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops) struct i2c_hid_of_elan *ihid_elan = container_of(ops, struct i2c_hid_of_elan, ops); - gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + /* + * Do not assert reset when the hardware allows for it to remain + * deasserted regardless of the state of the (shared) power supply to + * avoid wasting power when the supply is left on. + */ + if (!ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1); + if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms) msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms); @@ -79,6 +96,7 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops) static int i2c_hid_of_elan_probe(struct i2c_client *client) { struct i2c_hid_of_elan *ihid_elan; + int ret; ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL); if (!ihid_elan) @@ -93,21 +111,38 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client) if (IS_ERR(ihid_elan->reset_gpio)) return PTR_ERR(ihid_elan->reset_gpio); + ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node, + "no-reset-on-power-off"); + ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio"); - if (IS_ERR(ihid_elan->vccio)) - return PTR_ERR(ihid_elan->vccio); + if (IS_ERR(ihid_elan->vccio)) { + ret = PTR_ERR(ihid_elan->vccio); + goto err_deassert_reset; + } ihid_elan->chip_data = device_get_match_data(&client->dev); if (ihid_elan->chip_data->main_supply_name) { ihid_elan->vcc33 = devm_regulator_get(&client->dev, ihid_elan->chip_data->main_supply_name); - if (IS_ERR(ihid_elan->vcc33)) - return PTR_ERR(ihid_elan->vcc33); + if (IS_ERR(ihid_elan->vcc33)) { + ret = PTR_ERR(ihid_elan->vcc33); + goto err_deassert_reset; + } } - return i2c_hid_core_probe(client, &ihid_elan->ops, - ihid_elan->chip_data->hid_descriptor_address, 0); + ret = i2c_hid_core_probe(client, &ihid_elan->ops, + ihid_elan->chip_data->hid_descriptor_address, 0); + if (ret) + goto err_deassert_reset; + + return 0; + +err_deassert_reset: + if (ihid_elan->no_reset_on_power_off) + gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0); + + return ret; } static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = { From b472b996a43404a912c5cb4f27050022fdbce10c Mon Sep 17 00:00:00 2001 From: Udit Kumar Date: Fri, 31 May 2024 22:27:25 +0530 Subject: [PATCH 402/537] dt-bindings: net: dp8386x: Add MIT license along with GPL-2.0 Modify license to include dual licensing as GPL-2.0-only OR MIT license for TI specific phy header files. This allows for Linux kernel files to be used in other Operating System ecosystems such as Zephyr or FreeBSD. While at this, update the GPL-2.0 to be GPL-2.0-only to be in sync with latest SPDX conventions (GPL-2.0 is deprecated). While at this, update the TI copyright year to sync with current year to indicate license change. Cc: Thomas Gleixner Cc: Trent Piepho Cc: Wadim Egorov Cc: Kip Broadhurst Signed-off-by: Udit Kumar Acked-by: Wadim Egorov Acked-by: Rob Herring Signed-off-by: David S. Miller --- include/dt-bindings/net/ti-dp83867.h | 4 ++-- include/dt-bindings/net/ti-dp83869.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h index 6fc4b445d3a14..b8a4f3ff4a3b7 100644 --- a/include/dt-bindings/net/ti-dp83867.h +++ b/include/dt-bindings/net/ti-dp83867.h @@ -1,10 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ /* * Device Tree constants for the Texas Instruments DP83867 PHY * * Author: Dan Murphy * - * Copyright: (C) 2015 Texas Instruments, Inc. + * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _DT_BINDINGS_TI_DP83867_H diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h index 218b1a64e9750..917114aad7d0e 100644 --- a/include/dt-bindings/net/ti-dp83869.h +++ b/include/dt-bindings/net/ti-dp83869.h @@ -1,10 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ /* * Device Tree constants for the Texas Instruments DP83869 PHY * * Author: Dan Murphy * - * Copyright: (C) 2019 Texas Instruments, Inc. + * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef _DT_BINDINGS_TI_DP83869_H From 12cda920212a49fa22d9e8b9492ac4ea013310a4 Mon Sep 17 00:00:00 2001 From: Yonglong Liu Date: Wed, 5 Jun 2024 15:20:57 +0800 Subject: [PATCH 403/537] net: hns3: fix kernel crash problem in concurrent scenario When link status change, the nic driver need to notify the roce driver to handle this event, but at this time, the roce driver may uninit, then cause kernel crash. To fix the problem, when link status change, need to check whether the roce registered, and when uninit, need to wait link update finish. Fixes: 45e92b7e4e27 ("net: hns3: add calling roce callback function when link status change") Signed-off-by: Yonglong Liu Signed-off-by: Jijie Shao Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../hisilicon/hns3/hns3pf/hclge_main.c | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 43cc6ee4d87db..82574ce0194fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3086,9 +3086,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev) static void hclge_update_link_status(struct hclge_dev *hdev) { - struct hnae3_handle *rhandle = &hdev->vport[0].roce; struct hnae3_handle *handle = &hdev->vport[0].nic; - struct hnae3_client *rclient = hdev->roce_client; struct hnae3_client *client = hdev->nic_client; int state; int ret; @@ -3112,8 +3110,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev) client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); - if (rclient && rclient->ops->link_status_change) - rclient->ops->link_status_change(rhandle, state); + + if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) { + struct hnae3_handle *rhandle = &hdev->vport[0].roce; + struct hnae3_client *rclient = hdev->roce_client; + + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, + state); + } hclge_push_link_status(hdev); } @@ -11319,6 +11324,12 @@ static int hclge_init_client_instance(struct hnae3_client *client, return ret; } +static bool hclge_uninit_need_wait(struct hclge_dev *hdev) +{ + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); +} + static void hclge_uninit_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -11327,7 +11338,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, if (hdev->roce_client) { clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); - while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + while (hclge_uninit_need_wait(hdev)) msleep(HCLGE_WAIT_RESET_DONE); hdev->roce_client->ops->uninit_instance(&vport->roce, 0); From 968fde83841a8c23558dfbd0a0c69d636db52b55 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Wed, 5 Jun 2024 15:20:58 +0800 Subject: [PATCH 404/537] net: hns3: add cond_resched() to hns3 ring buffer init process Currently hns3 ring buffer init process would hold cpu too long with big Tx/Rx ring depth. This could cause soft lockup. So this patch adds cond_resched() to the process. Then cpu can break to run other tasks instead of busy looping. Fixes: a723fb8efe29 ("net: hns3: refine for set ring parameters") Signed-off-by: Jie Wang Signed-off-by: Jijie Shao Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 ++++ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index ff71fb1eced94..a5fc0209d628e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3535,6 +3535,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) ret = hns3_alloc_and_attach_buffer(ring, i); if (ret) goto out_buffer_fail; + + if (!(i % HNS3_RESCHED_BD_NUM)) + cond_resched(); } return 0; @@ -5107,6 +5110,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) } u64_stats_init(&priv->ring[i].syncp); + cond_resched(); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index acd756b0c7c9a..d36c4ed16d8dd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -214,6 +214,8 @@ enum hns3_nic_state { #define HNS3_CQ_MODE_EQE 1U #define HNS3_CQ_MODE_CQE 0U +#define HNS3_RESCHED_BD_NUM 1024 + enum hns3_pkt_l2t_type { HNS3_L2_TYPE_UNICAST, HNS3_L2_TYPE_MULTICAST, From 1af89dedc8a58006d8e385b1e0d2cd24df8a3b69 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 6 Jun 2024 20:27:19 +0200 Subject: [PATCH 405/537] thermal: core: Do not fail cdev registration because of invalid initial state It is reported that commit 31a0fa0019b0 ("thermal/debugfs: Pass cooling device state to thermal_debug_cdev_add()") causes the ACPI fan driver to fail probing on some systems which turns out to be due to the _FST control method returning an invalid value until _FSL is first evaluated for the given fan. If this happens, the .get_cur_state() cooling device callback returns an error and __thermal_cooling_device_register() fails as uses that callback after commit 31a0fa0019b0. Arguably, _FST should not return an invalid value even if it is evaluated before _FSL, so this may be regarded as a platform firmware issue, but at the same time it is not a good enough reason for failing the cooling device registration where the initial cooling device state is only needed to initialize a thermal debug facility. Accordingly, modify __thermal_cooling_device_register() to avoid calling thermal_debug_cdev_add() instead of returning an error if the initial .get_cur_state() callback invocation fails. Fixes: 31a0fa0019b0 ("thermal/debugfs: Pass cooling device state to thermal_debug_cdev_add()") Closes: https://lore.kernel.org/linux-acpi/20240530153727.843378-1-laura.nao@collabora.com Reported-by: Laura Nao Signed-off-by: Rafael J. Wysocki Acked-by: Daniel Lezcano Tested-by: Laura Nao --- drivers/thermal/thermal_core.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 30567b4994551..d70e76dd3c943 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -999,9 +999,17 @@ __thermal_cooling_device_register(struct device_node *np, if (ret) goto out_cdev_type; + /* + * The cooling device's current state is only needed for debug + * initialization below, so a failure to get it does not cause + * the entire cooling device initialization to fail. However, + * the debug will not work for the device if its initial state + * cannot be determined and drivers are responsible for ensuring + * that this will not happen. + */ ret = cdev->ops->get_cur_state(cdev, ¤t_state); if (ret) - goto out_cdev_type; + current_state = ULONG_MAX; thermal_cooling_device_setup_sysfs(cdev); @@ -1016,7 +1024,8 @@ __thermal_cooling_device_register(struct device_node *np, return ERR_PTR(ret); } - thermal_debug_cdev_add(cdev, current_state); + if (current_state <= cdev->max_state) + thermal_debug_cdev_add(cdev, current_state); /* Add 'this' new cdev to the global cdev list */ mutex_lock(&thermal_list_lock); From 7f18bd49cb6b6a3ab6d860fefccdc94f2a247db0 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 6 Jun 2024 20:27:30 +0200 Subject: [PATCH 406/537] thermal: ACPI: Invalidate trip points with temperature of 0 or below It is reported that commit 950210887670 ("thermal: core: Drop trips_disabled bitmask") causes the maximum frequency of CPUs to drop further down with every system sleep-wake cycle on Intel Core i7-4710HQ. This turns out to be due to a trip point whose temperature is equal to 0 degrees Celsius which is acted on every time the system wakes from sleep. Before commit 950210887670 this trip point would be disabled wia the trips_disabled bitmask, but now it is treated as a valid one. Since ACPI thermal control is generally about protection against overheating, trip points with temperature of 0 centigrade or below are not particularly useful there, so initialize them all as invalid which fixes the problem at hand. Fixes: 950210887670 ("thermal: core: Drop trips_disabled bitmask") Closes: https://lore.kernel.org/linux-pm/3f71747b-f852-4ee0-b384-cf46b2aefa3f@gmx.com Reported-by: Tibor Billes Tested-by: Tibor Billes Cc: 6.7+ # 6.7+ Signed-off-by: Rafael J. Wysocki --- drivers/acpi/thermal.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index d67881b50bca2..a0cfc857fb553 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz) static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k) { + int temp; + if (temp_deci_k == THERMAL_TEMP_INVALID) return THERMAL_TEMP_INVALID; - return deci_kelvin_to_millicelsius_with_offset(temp_deci_k, + temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k, tz->kelvin_offset); + if (temp <= 0) + return THERMAL_TEMP_INVALID; + + return temp; } static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip) From c44711b78608c98a3e6b49ce91678cd0917d5349 Mon Sep 17 00:00:00 2001 From: Aleksandr Mishin Date: Wed, 5 Jun 2024 13:11:35 +0300 Subject: [PATCH 407/537] liquidio: Adjust a NULL pointer handling path in lio_vf_rep_copy_packet In lio_vf_rep_copy_packet() pg_info->page is compared to a NULL value, but then it is unconditionally passed to skb_add_rx_frag() which looks strange and could lead to null pointer dereference. lio_vf_rep_copy_packet() call trace looks like: octeon_droq_process_packets octeon_droq_fast_process_packets octeon_droq_dispatch_pkt octeon_create_recv_info ...search in the dispatch_list... ->disp_fn(rdisp->rinfo, ...) lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, ...) In this path there is no code which sets pg_info->page to NULL. So this check looks unneeded and doesn't solve potential problem. But I guess the author had reason to add a check and I have no such card and can't do real test. In addition, the code in the function liquidio_push_packet() in liquidio/lio_core.c does exactly the same. Based on this, I consider the most acceptable compromise solution to adjust this issue by moving skb_add_rx_frag() into conditional scope. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 1f233f327913 ("liquidio: switchdev support for LiquidIO NIC") Signed-off-by: Aleksandr Mishin Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index 96c6ea12279f8..989b4ddae3426 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct, pg_info->page_offset; memcpy(skb->data, va, MIN_SKB_SIZE); skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - pg_info->page, - pg_info->page_offset + MIN_SKB_SIZE, - len - MIN_SKB_SIZE, - LIO_RXBUFFER_SZ); } else { struct octeon_skb_page_info *pg_info = ((struct octeon_skb_page_info *)(skb->cb)); From 0c76053e3fec801e86aca73f80072b3da4e72849 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 16 May 2024 11:33:43 +0300 Subject: [PATCH 408/537] drm: have config DRM_WERROR depend on !WERROR If WERROR is already enabled, there's no point in enabling DRM_WERROR or asking users about it. Reported-by: Linus Torvalds Closes: https://lore.kernel.org/r/CAHk-=whxT8D_0j=bjtrvj-O=VEOjn6GW8GK4j2V+BiDUntZKAQ@mail.gmail.com Fixes: f89632a9e5fa ("drm: Add CONFIG_DRM_WERROR") Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20240516083343.1375687-1-jani.nikula@intel.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 026444eeb5c60..d0aa277fc3bff 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -450,6 +450,7 @@ config DRM_PRIVACY_SCREEN config DRM_WERROR bool "Compile the drm subsystem with warnings as errors" depends on DRM && EXPERT + depends on !WERROR default n help A kernel build should not cause any compiler warnings, and this From a3a5a37efba11b7cf1a86abe7bccfbcdb521764e Mon Sep 17 00:00:00 2001 From: Louis Dalibard Date: Fri, 7 Jun 2024 16:53:43 +0200 Subject: [PATCH 409/537] HID: Ignore battery for ELAN touchscreens 2F2C and 4116 At least ASUS Zenbook 14 (2023) and ASUS Zenbook 14 Pro (2023) are affected. The touchscreen reports a battery status of 0% and jumps to 1% when a stylus is used. The device ID was added and the battery ignore quirk was enabled for it. [jkosina@suse.com: reformatted changelog a bit] Signed-off-by: Louis Dalibard Signed-off-by: Jiri Kosina --- drivers/hid/hid-ids.h | 2 ++ drivers/hid/hid-input.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 61d2a21affa26..72d56ee7ce1b9 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -423,6 +423,8 @@ #define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF #define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8 #define I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN 0x2C82 +#define I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN 0x2F2C +#define I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN 0x4116 #define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544 #define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706 #define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 8bb16e9b94aa5..c9094a4f281e9 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -377,6 +377,10 @@ static const struct hid_device_id hid_battery_quirks[] = { HID_BATTERY_QUIRK_IGNORE }, { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_GV301RA_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX3402_TOUCHSCREEN), + HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_UX6404_TOUCHSCREEN), + HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN), HID_BATTERY_QUIRK_IGNORE }, { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN), From e79a10652bbd320649da705ca1ea0c04351af403 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 9 May 2024 13:45:02 -0500 Subject: [PATCH 410/537] ACPI: x86: Force StorageD3Enable on more products A Rembrandt-based HP thin client is reported to have problems where the NVME disk isn't present after resume from s2idle. This is because the NVME disk wasn't put into D3 at suspend, and that happened because the StorageD3Enable _DSD was missing in the BIOS. As AMD's architecture requires that the NVME is in D3 for s2idle, adjust the criteria for force_storage_d3 to match *all* Zen SoCs when the FADT advertises low power idle support. This will ensure that any future products with this BIOS deficiency don't need to be added to the allow list of overrides. Cc: All applicable Signed-off-by: Mario Limonciello Acked-by: Hans de Goede Signed-off-by: Rafael J. Wysocki --- drivers/acpi/x86/utils.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 7dca73417e2bf..2fe0934dcd640 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -206,16 +206,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s } /* - * AMD systems from Renoir and Lucienne *require* that the NVME controller + * AMD systems from Renoir onwards *require* that the NVME controller * is put into D3 over a Modern Standby / suspend-to-idle cycle. * * This is "typically" accomplished using the `StorageD3Enable` * property in the _DSD that is checked via the `acpi_storage_d3` function - * but this property was introduced after many of these systems launched - * and most OEM systems don't have it in their BIOS. + * but some OEM systems still don't have it in their BIOS. * * The Microsoft documentation for StorageD3Enable mentioned that Windows has - * a hardcoded allowlist for D3 support, which was used for these platforms. + * a hardcoded allowlist for D3 support as well as a registry key to override + * the BIOS, which has been used for these cases. * * This allows quirking on Linux in a similar fashion. * @@ -228,19 +228,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s * https://bugzilla.kernel.org/show_bug.cgi?id=216773 * https://bugzilla.kernel.org/show_bug.cgi?id=217003 * 2) On at least one HP system StorageD3Enable is missing on the second NVME - disk in the system. + * disk in the system. + * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only + * NVME device. */ -static const struct x86_cpu_id storage_d3_cpu_ids[] = { - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */ - X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */ - {} -}; - bool force_storage_d3(void) { - return x86_match_cpu(storage_d3_cpu_ids); + if (!cpu_feature_enabled(X86_FEATURE_ZEN)) + return false; + return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0; } /* From 0579f27249047006a818e463ee66a6c314d04cea Mon Sep 17 00:00:00 2001 From: Sagar Cheluvegowda Date: Wed, 5 Jun 2024 11:57:18 -0700 Subject: [PATCH 411/537] net: stmmac: dwmac-qcom-ethqos: Configure host DMA width Commit 070246e4674b ("net: stmmac: Fix for mismatched host/device DMA address width") added support in the stmmac driver for platform drivers to indicate the host DMA width, but left it up to authors of the specific platforms to indicate if their width differed from the addr64 register read from the MAC itself. Qualcomm's EMAC4 integration supports only up to 36 bit width (as opposed to the addr64 register indicating 40 bit width). Let's indicate that in the platform driver to avoid a scenario where the driver will allocate descriptors of size that is supported by the CPU which in our case is 36 bit, but as the addr64 register is still capable of 40 bits the device will use two descriptors as one address. Fixes: 8c4d92e82d50 ("net: stmmac: dwmac-qcom-ethqos: add support for emac4 on sa8775p platforms") Signed-off-by: Sagar Cheluvegowda Reviewed-by: Simon Horman Reviewed-by: Andrew Halaney Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e254b21fdb598..65d7370b47d57 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -93,6 +93,7 @@ struct ethqos_emac_driver_data { bool has_emac_ge_3; const char *link_clk_name; bool has_integrated_pcs; + u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; }; @@ -276,6 +277,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .has_emac_ge_3 = true, .link_clk_name = "phyaux", .has_integrated_pcs = true, + .dma_addr_width = 36, .dwmac4_addrs = { .dma_chan = 0x00008100, .dma_chan_offset = 0x1000, @@ -845,6 +847,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI; if (data->has_integrated_pcs) plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS; + if (data->dma_addr_width) + plat_dat->host_dma_width = data->dma_addr_width; if (ethqos->serdes_phy) { plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup; From 83a7eefedc9b56fe7bfeff13b6c7356688ffa670 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 9 Jun 2024 14:19:43 -0700 Subject: [PATCH 412/537] Linux 6.10-rc3 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7f921ae547f11..925a75b8ba7de 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Baby Opossum Posse # *DOCUMENTATION* From 799d4b392417ed6889030a5b2335ccb6dcf030ab Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 25 Apr 2024 11:48:51 +0200 Subject: [PATCH 413/537] drm/exynos: hdmi: report safe 640x480 mode as a fallback when no EDID found When reading EDID fails and driver reports no modes available, the DRM core adds an artificial 1024x786 mode to the connector. Unfortunately some variants of the Exynos HDMI (like the one in Exynos4 SoCs) are not able to drive such mode, so report a safe 640x480 mode instead of nothing in case of the EDID reading failure. This fixes the following issue observed on Trats2 board since commit 13d5b040363c ("drm/exynos: do not return negative values from .get_modes()"): [drm] Exynos DRM: using 11c00000.fimd device for DMA mapping operations exynos-drm exynos-drm: bound 11c00000.fimd (ops fimd_component_ops) exynos-drm exynos-drm: bound 12c10000.mixer (ops mixer_component_ops) exynos-dsi 11c80000.dsi: [drm:samsung_dsim_host_attach] Attached s6e8aa0 device (lanes:4 bpp:24 mode-flags:0x10b) exynos-drm exynos-drm: bound 11c80000.dsi (ops exynos_dsi_component_ops) exynos-drm exynos-drm: bound 12d00000.hdmi (ops hdmi_component_ops) [drm] Initialized exynos 1.1.0 20180330 for exynos-drm on minor 1 exynos-hdmi 12d00000.hdmi: [drm:hdmiphy_enable.part.0] *ERROR* PLL could not reach steady state panel-samsung-s6e8aa0 11c80000.dsi.0: ID: 0xa2, 0x20, 0x8c exynos-mixer 12c10000.mixer: timeout waiting for VSYNC ------------[ cut here ]------------ WARNING: CPU: 1 PID: 11 at drivers/gpu/drm/drm_atomic_helper.c:1682 drm_atomic_helper_wait_for_vblanks.part.0+0x2b0/0x2b8 [CRTC:70:crtc-1] vblank wait timed out Modules linked in: CPU: 1 PID: 11 Comm: kworker/u16:0 Not tainted 6.9.0-rc5-next-20240424 #14913 Hardware name: Samsung Exynos (Flattened Device Tree) Workqueue: events_unbound deferred_probe_work_func Call trace: unwind_backtrace from show_stack+0x10/0x14 show_stack from dump_stack_lvl+0x68/0x88 dump_stack_lvl from __warn+0x7c/0x1c4 __warn from warn_slowpath_fmt+0x11c/0x1a8 warn_slowpath_fmt from drm_atomic_helper_wait_for_vblanks.part.0+0x2b0/0x2b8 drm_atomic_helper_wait_for_vblanks.part.0 from drm_atomic_helper_commit_tail_rpm+0x7c/0x8c drm_atomic_helper_commit_tail_rpm from commit_tail+0x9c/0x184 commit_tail from drm_atomic_helper_commit+0x168/0x190 drm_atomic_helper_commit from drm_atomic_commit+0xb4/0xe0 drm_atomic_commit from drm_client_modeset_commit_atomic+0x23c/0x27c drm_client_modeset_commit_atomic from drm_client_modeset_commit_locked+0x60/0x1cc drm_client_modeset_commit_locked from drm_client_modeset_commit+0x24/0x40 drm_client_modeset_commit from __drm_fb_helper_restore_fbdev_mode_unlocked+0x9c/0xc4 __drm_fb_helper_restore_fbdev_mode_unlocked from drm_fb_helper_set_par+0x2c/0x3c drm_fb_helper_set_par from fbcon_init+0x3d8/0x550 fbcon_init from visual_init+0xc0/0x108 visual_init from do_bind_con_driver+0x1b8/0x3a4 do_bind_con_driver from do_take_over_console+0x140/0x1ec do_take_over_console from do_fbcon_takeover+0x70/0xd0 do_fbcon_takeover from fbcon_fb_registered+0x19c/0x1ac fbcon_fb_registered from register_framebuffer+0x190/0x21c register_framebuffer from __drm_fb_helper_initial_config_and_unlock+0x350/0x574 __drm_fb_helper_initial_config_and_unlock from exynos_drm_fbdev_client_hotplug+0x6c/0xb0 exynos_drm_fbdev_client_hotplug from drm_client_register+0x58/0x94 drm_client_register from exynos_drm_bind+0x160/0x190 exynos_drm_bind from try_to_bring_up_aggregate_device+0x200/0x2d8 try_to_bring_up_aggregate_device from __component_add+0xb0/0x170 __component_add from mixer_probe+0x74/0xcc mixer_probe from platform_probe+0x5c/0xb8 platform_probe from really_probe+0xe0/0x3d8 really_probe from __driver_probe_device+0x9c/0x1e4 __driver_probe_device from driver_probe_device+0x30/0xc0 driver_probe_device from __device_attach_driver+0xa8/0x120 __device_attach_driver from bus_for_each_drv+0x80/0xcc bus_for_each_drv from __device_attach+0xac/0x1fc __device_attach from bus_probe_device+0x8c/0x90 bus_probe_device from deferred_probe_work_func+0x98/0xe0 deferred_probe_work_func from process_one_work+0x240/0x6d0 process_one_work from worker_thread+0x1a0/0x3f4 worker_thread from kthread+0x104/0x138 kthread from ret_from_fork+0x14/0x28 Exception stack(0xf0895fb0 to 0xf0895ff8) ... irq event stamp: 82357 hardirqs last enabled at (82363): [] vprintk_emit+0x308/0x33c hardirqs last disabled at (82368): [] vprintk_emit+0x2bc/0x33c softirqs last enabled at (81614): [] __do_softirq+0x320/0x500 softirqs last disabled at (81609): [] __irq_exit_rcu+0x130/0x184 ---[ end trace 0000000000000000 ]--- exynos-drm exynos-drm: [drm] *ERROR* flip_done timed out exynos-drm exynos-drm: [drm] *ERROR* [CRTC:70:crtc-1] commit wait timed out exynos-drm exynos-drm: [drm] *ERROR* flip_done timed out exynos-drm exynos-drm: [drm] *ERROR* [CONNECTOR:74:HDMI-A-1] commit wait timed out exynos-drm exynos-drm: [drm] *ERROR* flip_done timed out exynos-drm exynos-drm: [drm] *ERROR* [PLANE:56:plane-5] commit wait timed out exynos-mixer 12c10000.mixer: timeout waiting for VSYNC Cc: stable@vger.kernel.org Fixes: 13d5b040363c ("drm/exynos: do not return negative values from .get_modes()") Signed-off-by: Marek Szyprowski Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_hdmi.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index e968824a4c728..1e26cd4f83479 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector) int ret; if (!hdata->ddc_adpt) - return 0; + goto no_edid; edid = drm_get_edid(connector, hdata->ddc_adpt); if (!edid) - return 0; + goto no_edid; hdata->dvi_mode = !connector->display_info.is_hdmi; DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", @@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector) kfree(edid); return ret; + +no_edid: + return drm_add_modes_noedid(connector, 640, 480); } static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) From 1f3512cdf8299f9edaea9046d53ea324a7730bab Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 4 Jun 2024 15:11:29 +0200 Subject: [PATCH 414/537] drm/exynos: dp: drop driver owner initialization Core in platform_driver_register() already sets the .owner, so driver does not need to. Whatever is set here will be anyway overwritten by main driver calling platform_driver_register(). Signed-off-by: Krzysztof Kozlowski Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_dp.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index f48c4343f4690..3e6d4c6aa877e 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -285,7 +285,6 @@ struct platform_driver dp_driver = { .remove_new = exynos_dp_remove, .driver = { .name = "exynos-dp", - .owner = THIS_MODULE, .pm = pm_ptr(&exynos_dp_pm_ops), .of_match_table = exynos_dp_match, }, From fe8a08973a0dea9757394c5adbdc3c0a03b0b432 Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Fri, 7 Jun 2024 16:32:59 -0500 Subject: [PATCH 415/537] RAS/AMD/ATL: Fix MI300 bank hash Apply the SID bits to the correct offset in the Bank value. Do this in the temporary value so they don't need to be masked off later. Fixes: 87a612375307 ("RAS/AMD/ATL: Add MI300 DRAM to normalized address translation support") Signed-off-by: Yazen Ghannam Signed-off-by: Borislav Petkov (AMD) Cc: Link: https://lore.kernel.org/r/20240607-mi300-dram-xl-fix-v1-1-2f11547a178c@amd.com --- drivers/ras/amd/atl/umc.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c index 59b6169093f77..5cb92330dc677 100644 --- a/drivers/ras/amd/atl/umc.c +++ b/drivers/ras/amd/atl/umc.c @@ -189,16 +189,11 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) /* Calculate hash for PC bit. */ if (addr_hash.pc.xor_enable) { - /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */ - bank |= sid << 5; - temp = bitwise_xor_bits(col & addr_hash.pc.col_xor); temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor); - temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor); + /* Bits SID[1:0] act as Bank[5:4] for PC hash, so apply them here. */ + temp ^= bitwise_xor_bits((bank | sid << NUM_BANK_BITS) & addr_hash.bank_xor); pc ^= temp; - - /* Drop SID bits for the sake of debug printing later. */ - bank &= 0x1F; } /* Reconstruct the normalized address starting with NA[4:0] = 0 */ From 38e3825631b1f314b21e3ade00b5a4d737eb054e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 30 May 2024 13:01:51 +0300 Subject: [PATCH 416/537] drm/exynos/vidi: fix memory leak in .get_modes() The duplicated EDID is never freed. Fix it. Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_vidi.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index fab135308b70f..11a720fef32b4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector) struct vidi_context *ctx = ctx_from_connector(connector); struct edid *edid; int edid_len; + int count; /* * the edid data comes from user side and it would be set @@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector) drm_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + count = drm_add_edid_modes(connector, edid); + + kfree(edid); + + return count; } static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { From 58f880711f2ba53fd5e959875aff5b3bf6d5c32e Mon Sep 17 00:00:00 2001 From: Wengang Wang Date: Thu, 6 Jun 2024 11:11:57 -0700 Subject: [PATCH 417/537] xfs: make sure sb_fdblocks is non-negative A user with a completely full filesystem experienced an unexpected shutdown when the filesystem tried to write the superblock during runtime. kernel shows the following dmesg: [ 8.176281] XFS (dm-4): Metadata corruption detected at xfs_sb_write_verify+0x60/0x120 [xfs], xfs_sb block 0x0 [ 8.177417] XFS (dm-4): Unmount and run xfs_repair [ 8.178016] XFS (dm-4): First 128 bytes of corrupted metadata buffer: [ 8.178703] 00000000: 58 46 53 42 00 00 10 00 00 00 00 00 01 90 00 00 XFSB............ [ 8.179487] 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ [ 8.180312] 00000020: cf 12 dc 89 ca 26 45 29 92 e6 e3 8d 3b b8 a2 c3 .....&E)....;... [ 8.181150] 00000030: 00 00 00 00 01 00 00 06 00 00 00 00 00 00 00 80 ................ [ 8.182003] 00000040: 00 00 00 00 00 00 00 81 00 00 00 00 00 00 00 82 ................ [ 8.182004] 00000050: 00 00 00 01 00 64 00 00 00 00 00 04 00 00 00 00 .....d.......... [ 8.182004] 00000060: 00 00 64 00 b4 a5 02 00 02 00 00 08 00 00 00 00 ..d............. [ 8.182005] 00000070: 00 00 00 00 00 00 00 00 0c 09 09 03 17 00 00 19 ................ [ 8.182008] XFS (dm-4): Corruption of in-memory data detected. Shutting down filesystem [ 8.182010] XFS (dm-4): Please unmount the filesystem and rectify the problem(s) When xfs_log_sb writes super block to disk, b_fdblocks is fetched from m_fdblocks without any lock. As m_fdblocks can experience a positive -> negative -> positive changing when the FS reaches fullness (see xfs_mod_fdblocks). So there is a chance that sb_fdblocks is negative, and because sb_fdblocks is type of unsigned long long, it reads super big. And sb_fdblocks being bigger than sb_dblocks is a problem during log recovery, xfs_validate_sb_write() complains. Fix: As sb_fdblocks will be re-calculated during mount when lazysbcount is enabled, We just need to make xfs_validate_sb_write() happy -- make sure sb_fdblocks is not nenative. This patch also takes care of other percpu counters in xfs_log_sb. Signed-off-by: Wengang Wang Reviewed-by: Darrick J. Wong Signed-off-by: Chandan Babu R --- fs/xfs/libxfs/xfs_sb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 09e4bf949bf88..6b56f0f6d4c1a 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -1038,11 +1038,12 @@ xfs_log_sb( * and hence we don't need have to update it here. */ if (xfs_has_lazysbcount(mp)) { - mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); + mp->m_sb.sb_icount = percpu_counter_sum_positive(&mp->m_icount); mp->m_sb.sb_ifree = min_t(uint64_t, - percpu_counter_sum(&mp->m_ifree), + percpu_counter_sum_positive(&mp->m_ifree), mp->m_sb.sb_icount); - mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks); + mp->m_sb.sb_fdblocks = + percpu_counter_sum_positive(&mp->m_fdblocks); } xfs_sb_to_disk(bp->b_addr, &mp->m_sb); From f74fb5df429ebc6a614dc5aa9e44d7194d402e5a Mon Sep 17 00:00:00 2001 From: Tobias Jakobi Date: Sun, 10 Mar 2024 23:04:00 +0100 Subject: [PATCH 418/537] drm: panel-orientation-quirks: Add quirk for Aya Neo KUN Similar to the other Aya Neo devices this one features again a portrait screen, here with a native resolution of 1600x2560. Signed-off-by: Tobias Jakobi Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20240310220401.895591-1-tjakobi@math.uni-bielefeld.de --- drivers/gpu/drm/drm_panel_orientation_quirks.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index aa93129c3397e..2166208a961d6 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -202,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_BOARD_NAME, "NEXT"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO KUN */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "KUN"), + }, + .driver_data = (void *)&lcd1600x2560_rightside_up, }, { /* Chuwi HiBook (CWI514) */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), From 5add2f7288468f35a374620dabf126c13baaea9c Mon Sep 17 00:00:00 2001 From: David Wei Date: Thu, 6 Jun 2024 07:59:08 -0700 Subject: [PATCH 419/537] netdevsim: fix backwards compatibility in nsim_get_iflink() The default ndo_get_iflink() implementation returns the current ifindex of the netdev. But the overridden nsim_get_iflink() returns 0 if the current nsim is not linked, breaking backwards compatibility for userspace that depend on this behaviour. Fix the problem by returning the current ifindex if not linked to a peer. Fixes: 8debcf5832c3 ("netdevsim: add ndo_get_iflink() implementation") Reported-by: Yu Watanabe Suggested-by: Yu Watanabe Signed-off-by: David Wei Signed-off-by: David S. Miller --- drivers/net/netdevsim/netdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index c22897bf5509c..017a6102be0a2 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -324,7 +324,8 @@ static int nsim_get_iflink(const struct net_device *dev) rcu_read_lock(); peer = rcu_dereference(nsim->peer); - iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0; + iflink = peer ? READ_ONCE(peer->netdev->ifindex) : + READ_ONCE(dev->ifindex); rcu_read_unlock(); return iflink; From e3cf20e5c68df604315ab30bdbe15dc8a5da556b Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 4 Jun 2024 22:32:34 +0100 Subject: [PATCH 420/537] ARM: 9405/1: ftrace: Don't assume stack frames are contiguous in memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The frame pointer unwinder relies on a standard layout of the stack frame, consisting of (in downward order) Calling frame: PC <---------+ LR | SP | FP | .. locals .. | Callee frame: | PC | LR | SP | FP ----------+ where after storing its previous value on the stack, FP is made to point at the location of PC in the callee stack frame, using the canonical prologue: mov ip, sp stmdb sp!, {fp, ip, lr, pc} sub fp, ip, #4 The ftrace code assumes that this activation record is pushed first, and that any stack space for locals is allocated below this. Strict adherence to this would imply that the caller's value of SP at the time of the function call can always be obtained by adding 4 to FP (which points to PC in the callee frame). However, recent versions of GCC appear to deviate from this rule, and so the only reliable way to obtain the caller's value of SP is to read it from the activation record. Since this involves a read from memory rather than simple arithmetic, we need to use the uaccess API here which protects against inadvertent data aborts resulting from attempts to dereference bogus FP values. The plain uaccess API is ftrace instrumented itself, so to avoid unbounded recursion, use the __get_kernel_nofault() primitive directly. Closes: https://lore.kernel.org/all/alp44tukzo6mvcwl4ke4ehhmojrqnv6xfcdeuliybxfjfvgd3e@gpjvwj33cc76 Closes: https://lore.kernel.org/all/d870c149-4363-43de-b0ea-7125dec5608e@broadcom.com/ Reported-by: Uwe Kleine-König Reported-by: Justin Chen Tested-by: Thorsten Scherer Reviewed-by: Linus Walleij Signed-off-by: Ard Biesheuvel Signed-off-by: Russell King (Oracle) --- arch/arm/kernel/ftrace.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index a0b6d1e3812fd..e61591f33a6cd 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -232,11 +232,24 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long old; if (unlikely(atomic_read(¤t->tracing_graph_pause))) +err_out: return; if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) { - /* FP points one word below parent's top of stack */ - frame_pointer += 4; + /* + * Usually, the stack frames are contiguous in memory but cases + * have been observed where the next stack frame does not live + * at 'frame_pointer + 4' as this code used to assume. + * + * Instead, dereference the field in the stack frame that + * stores the SP of the calling frame: to avoid unbounded + * recursion, this cannot involve any ftrace instrumented + * functions, so use the __get_kernel_nofault() primitive + * directly. + */ + __get_kernel_nofault(&frame_pointer, + (unsigned long *)(frame_pointer - 8), + unsigned long, err_out); } else { struct stackframe frame = { .fp = frame_pointer, From b880018edd3a577e50366338194dee9b899947e0 Mon Sep 17 00:00:00 2001 From: Amjad Ouled-Ameur Date: Mon, 10 Jun 2024 11:20:56 +0100 Subject: [PATCH 421/537] drm/komeda: check for error-valued pointer komeda_pipeline_get_state() may return an error-valued pointer, thus check the pointer for negative or null value before dereferencing. Fixes: 502932a03fce ("drm/komeda: Add the initial scaler support for CORE") Signed-off-by: Amjad Ouled-Ameur Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20240610102056.40406-1-amjad.ouled-ameur@arm.com --- drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c index f3e744172673c..f4e76b46ca327 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c @@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c, u32 avail_scalers; pipe_st = komeda_pipeline_get_state(c->pipeline, state); - if (!pipe_st) + if (IS_ERR_OR_NULL(pipe_st)) return NULL; avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^ From ce62600c4dbee8d43b02277669dd91785a9b81d9 Mon Sep 17 00:00:00 2001 From: Adam Miotk Date: Mon, 10 Jun 2024 11:27:39 +0100 Subject: [PATCH 422/537] drm/bridge/panel: Fix runtime warning on panel bridge release Device managed panel bridge wrappers are created by calling to drm_panel_bridge_add_typed() and registering a release handler for clean-up when the device gets unbound. Since the memory for this bridge is also managed and linked to the panel device, the release function should not try to free that memory. Moreover, the call to devm_kfree() inside drm_panel_bridge_remove() will fail in this case and emit a warning because the panel bridge resource is no longer on the device resources list (it has been removed from there before the call to release handlers). Fixes: 67022227ffb1 ("drm/bridge: Add a devm_ allocator for panel bridge.") Signed-off-by: Adam Miotk Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20240610102739.139852-1-adam.miotk@arm.com --- drivers/gpu/drm/bridge/panel.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 32506524d9a2d..fe5fb08c9fc42 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -360,9 +360,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation); static void devm_drm_panel_bridge_release(struct device *dev, void *res) { - struct drm_bridge **bridge = res; + struct drm_bridge *bridge = *(struct drm_bridge **)res; - drm_panel_bridge_remove(*bridge); + if (!bridge) + return; + + drm_bridge_remove(bridge); } /** From d37fe4255abe8e7b419b90c5847e8ec2b8debb08 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 6 Jun 2024 15:46:51 +0000 Subject: [PATCH 423/537] tcp: fix race in tcp_v6_syn_recv_sock() tcp_v6_syn_recv_sock() calls ip6_dst_store() before inet_sk(newsk)->pinet6 has been set up. This means ip6_dst_store() writes over the parent (listener) np->dst_cookie. This is racy because multiple threads could share the same parent and their final np->dst_cookie could be wrong. Move ip6_dst_store() call after inet_sk(newsk)->pinet6 has been changed and after the copy of parent ipv6_pinfo. Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets") Signed-off-by: Eric Dumazet Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/ipv6/tcp_ipv6.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 8c577b651bfcd..729faf8bd366a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1439,7 +1439,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * */ newsk->sk_gso_type = SKB_GSO_TCPV6; - ip6_dst_store(newsk, dst, NULL, NULL); inet6_sk_rx_dst_set(newsk, skb); inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); @@ -1450,6 +1449,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * memcpy(newnp, np, sizeof(struct ipv6_pinfo)); + ip6_dst_store(newsk, dst, NULL, NULL); + newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; newnp->saddr = ireq->ir_v6_loc_addr; newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; From d029edefed39647c797c2710aedd9d31f84c069e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 6 Jun 2024 19:13:03 +0300 Subject: [PATCH 424/537] net dsa: qca8k: fix usages of device_get_named_child_node() The documentation for device_get_named_child_node() mentions this important point: " The caller is responsible for calling fwnode_handle_put() on the returned fwnode pointer. " Add fwnode_handle_put() to avoid leaked references. Fixes: 1e264f9d2918 ("net: dsa: qca8k: add LEDs basic support") Reviewed-by: Simon Horman Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/net/dsa/qca/qca8k-leds.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c index 811ebeeff4ed7..43ac68052baf9 100644 --- a/drivers/net/dsa/qca/qca8k-leds.c +++ b/drivers/net/dsa/qca/qca8k-leds.c @@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", priv->internal_mdio_bus->id, port_num); - if (!init_data.devicename) + if (!init_data.devicename) { + fwnode_handle_put(led); + fwnode_handle_put(leds); return -ENOMEM; + } ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data); if (ret) @@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p kfree(init_data.devicename); } + fwnode_handle_put(leds); return 0; } @@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv) * the correct port for LED setup. */ ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num)); - if (ret) + if (ret) { + fwnode_handle_put(port); + fwnode_handle_put(ports); return ret; + } } + fwnode_handle_put(ports); return 0; } From c6ae073f5903f6c6439d0ac855836a4da5c0a701 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 6 Jun 2024 23:32:48 +0300 Subject: [PATCH 425/537] geneve: Fix incorrect inner network header offset when innerprotoinherit is set When innerprotoinherit is set, the tunneled packets do not have an inner Ethernet header. Change 'maclen' to not always assume the header length is ETH_HLEN, as there might not be a MAC header. This resolves issues with drivers (e.g. mlx5, in mlx5e_tx_tunnel_accel()) who rely on the skb inner network header offset to be correct, and use it for TX offloads. Fixes: d8a6213d70ac ("geneve: fix header validation in geneve[6]_xmit_skb") Signed-off-by: Gal Pressman Signed-off-by: Tariq Toukan Reviewed-by: Wojciech Drewek Signed-off-by: David S. Miller --- drivers/net/geneve.c | 10 ++++++---- include/net/ip_tunnels.h | 5 +++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 51495cb4b9be4..838e85ddec671 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -815,6 +815,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { + bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); const struct ip_tunnel_key *key = &info->key; @@ -826,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb)) + if (!skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs4) @@ -908,7 +909,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, } err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr), - geneve->cfg.inner_proto_inherit); + inner_proto_inherit); if (unlikely(err)) return err; @@ -925,6 +926,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve, const struct ip_tunnel_info *info) { + bool inner_proto_inherit = geneve->cfg.inner_proto_inherit; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); const struct ip_tunnel_key *key = &info->key; @@ -935,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - if (!skb_vlan_inet_prepare(skb)) + if (!skb_vlan_inet_prepare(skb, inner_proto_inherit)) return -EINVAL; if (!gs6) @@ -997,7 +999,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = ttl ? : ip6_dst_hoplimit(dst); } err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr), - geneve->cfg.inner_proto_inherit); + inner_proto_inherit); if (unlikely(err)) return err; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 9a6a08ec77139..1db2417b8ff52 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -461,9 +461,10 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb) /* Variant of pskb_inet_may_pull(). */ -static inline bool skb_vlan_inet_prepare(struct sk_buff *skb) +static inline bool skb_vlan_inet_prepare(struct sk_buff *skb, + bool inner_proto_inherit) { - int nhlen = 0, maclen = ETH_HLEN; + int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN; __be16 type = skb->protocol; /* Essentially this is skb_protocol(skb, true) From 791b4089e326271424b78f2fae778b20e53d071b Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 6 Jun 2024 23:32:49 +0300 Subject: [PATCH 426/537] net/mlx5e: Fix features validation check for tunneled UDP (non-VXLAN) packets Move the vxlan_features_check() call to after we verified the packet is a tunneled VXLAN packet. Without this, tunneled UDP non-VXLAN packets (for ex. GENENVE) might wrongly not get offloaded. In some cases, it worked by chance as GENEVE header is the same size as VXLAN, but it is obviously incorrect. Fixes: e3cfc7e6b7bd ("net/mlx5e: TX, Add geneve tunnel stateless offload support") Signed-off-by: Gal Pressman Reviewed-by: Dragos Tatulea Signed-off-by: Tariq Toukan Reviewed-by: Wojciech Drewek Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c53c99dde5587..a605eae56685d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4875,7 +4875,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, /* Verify if UDP port is being offloaded by HW */ if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port)) - return features; + return vxlan_features_check(skb, features); #if IS_ENABLED(CONFIG_GENEVE) /* Support Geneve offload for default UDP port */ @@ -4901,7 +4901,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb, struct mlx5e_priv *priv = netdev_priv(netdev); features = vlan_features_check(skb, features); - features = vxlan_features_check(skb, features); /* Validate if the tunneled packet is being offloaded by HW */ if (skb->encapsulation && From 86fbd9f63a6b42b8f158361334f5a25762aea358 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Wed, 15 May 2024 10:32:01 -0400 Subject: [PATCH 427/537] Bluetooth: hci_sync: Fix not using correct handle When setting up an advertisement the code shall always attempt to use the handle set by the instance since it may not be equal to the instance ID. Fixes: e77f43d531af ("Bluetooth: hci_core: Fix not handling hdev->le_num_of_adv_sets=1") Signed-off-by: Luiz Augusto von Dentz --- net/bluetooth/hci_sync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 16daa79b7981f..a8a7d2b368701 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -1194,7 +1194,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; - cp.handle = instance; + cp.handle = adv ? adv->handle : instance; if (flags & MGMT_ADV_FLAG_SEC_2M) { cp.primary_phy = HCI_ADV_PHY_1M; From 806a5198c05987b748b50f3d0c0cfb3d417381a4 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Mon, 20 May 2024 16:03:07 -0400 Subject: [PATCH 428/537] Bluetooth: L2CAP: Fix rejecting L2CAP_CONN_PARAM_UPDATE_REQ This removes the bogus check for max > hcon->le_conn_max_interval since the later is just the initial maximum conn interval not the maximum the stack could support which is really 3200=4000ms. In order to pass GAP/CONN/CPUP/BV-05-C one shall probably enter values of the following fields in IXIT that would cause hci_check_conn_params to fail: TSPX_conn_update_int_min TSPX_conn_update_int_max TSPX_conn_update_peripheral_latency TSPX_conn_update_supervision_timeout Link: https://github.com/bluez/bluez/issues/847 Fixes: e4b019515f95 ("Bluetooth: Enforce validation on max value of connection interval") Signed-off-by: Luiz Augusto von Dentz --- include/net/bluetooth/hci_core.h | 36 ++++++++++++++++++++++++++++---- net/bluetooth/l2cap_core.c | 8 +------ 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 9231396fe96fd..c43716edf2056 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -2113,18 +2113,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency, { u16 max_latency; - if (min > max || min < 6 || max > 3200) + if (min > max) { + BT_WARN("min %d > max %d", min, max); return -EINVAL; + } + + if (min < 6) { + BT_WARN("min %d < 6", min); + return -EINVAL; + } + + if (max > 3200) { + BT_WARN("max %d > 3200", max); + return -EINVAL; + } + + if (to_multiplier < 10) { + BT_WARN("to_multiplier %d < 10", to_multiplier); + return -EINVAL; + } - if (to_multiplier < 10 || to_multiplier > 3200) + if (to_multiplier > 3200) { + BT_WARN("to_multiplier %d > 3200", to_multiplier); return -EINVAL; + } - if (max >= to_multiplier * 8) + if (max >= to_multiplier * 8) { + BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier); return -EINVAL; + } max_latency = (to_multiplier * 4 / max) - 1; - if (latency > 499 || latency > max_latency) + if (latency > 499) { + BT_WARN("latency %d > 499", latency); return -EINVAL; + } + + if (latency > max_latency) { + BT_WARN("latency %d > max_latency %d", latency, max_latency); + return -EINVAL; + } return 0; } diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 5b509b7675572..c49e0d4b3c0dd 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4647,13 +4647,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, memset(&rsp, 0, sizeof(rsp)); - if (max > hcon->le_conn_max_interval) { - BT_DBG("requested connection interval exceeds current bounds."); - err = -EINVAL; - } else { - err = hci_check_conn_params(min, max, latency, to_multiplier); - } - + err = hci_check_conn_params(min, max, latency, to_multiplier); if (err) rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else From c695439d198d30e10553a3b98360c5efe77b6903 Mon Sep 17 00:00:00 2001 From: Pauli Virtanen Date: Sun, 9 Jun 2024 18:06:20 +0300 Subject: [PATCH 429/537] Bluetooth: fix connection setup in l2cap_connect The amp_id argument of l2cap_connect() was removed in commit 84a4bb6548a2 ("Bluetooth: HCI: Remove HCI_AMP support") It was always called with amp_id == 0, i.e. AMP_ID_BREDR == 0x00 (ie. non-AMP controller). In the above commit, the code path for amp_id != 0 was preserved, although it should have used the amp_id == 0 one. Restore the previous behavior of the non-AMP code path, to fix problems with L2CAP connections. Fixes: 84a4bb6548a2 ("Bluetooth: HCI: Remove HCI_AMP support") Signed-off-by: Pauli Virtanen Signed-off-by: Luiz Augusto von Dentz --- net/bluetooth/l2cap_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index c49e0d4b3c0dd..aed025734d047 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4011,8 +4011,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, status = L2CAP_CS_AUTHOR_PEND; chan->ops->defer(chan); } else { - l2cap_state_change(chan, BT_CONNECT2); - result = L2CAP_CR_PEND; + l2cap_state_change(chan, BT_CONFIG); + result = L2CAP_CR_SUCCESS; status = L2CAP_CS_NO_INFO; } } else { From 161f73c2c7d061a78390388811e3a6d11e99ce9d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 5 Jun 2024 11:08:20 -0400 Subject: [PATCH 430/537] bcachefs: Split out btree_write_submit_wq Split the workqueues for btree read completions and btree write submissions; we don't want concurrency control on btree read completions, but we do want concurrency control on write submissions, else blocking in submit_bio() will cause a ton of kworkers to be allocated. Signed-off-by: Kent Overstreet --- fs/bcachefs/bcachefs.h | 3 ++- fs/bcachefs/btree_io.c | 8 ++++---- fs/bcachefs/super.c | 10 +++++++--- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 2a538eb2af110..2992a644d822c 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -790,7 +790,8 @@ struct bch_fs { /* BTREE CACHE */ struct bio_set btree_bio; - struct workqueue_struct *io_complete_wq; + struct workqueue_struct *btree_read_complete_wq; + struct workqueue_struct *btree_write_submit_wq; struct btree_root btree_roots_known[BTREE_ID_NR]; DARRAY(struct btree_root) btree_roots_extra; diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 829c1b91477d7..7bca15c604f53 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1389,7 +1389,7 @@ static void btree_node_read_endio(struct bio *bio) bch2_latency_acct(ca, rb->start_time, READ); } - queue_work(c->io_complete_wq, &rb->work); + queue_work(c->btree_read_complete_wq, &rb->work); } struct btree_node_read_all { @@ -1656,7 +1656,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool btree_node_read_all_replicas_done(&ra->cl.work); } else { continue_at(&ra->cl, btree_node_read_all_replicas_done, - c->io_complete_wq); + c->btree_read_complete_wq); } return 0; @@ -1737,7 +1737,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, if (sync) btree_node_read_work(&rb->work); else - queue_work(c->io_complete_wq, &rb->work); + queue_work(c->btree_read_complete_wq, &rb->work); } } @@ -2229,7 +2229,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes); INIT_WORK(&wbio->work, btree_write_submit); - queue_work(c->io_complete_wq, &wbio->work); + queue_work(c->btree_write_submit_wq, &wbio->work); return; err: set_btree_node_noevict(b); diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index df2bea38e83f0..65e239d329157 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -582,8 +582,10 @@ static void __bch2_fs_free(struct bch_fs *c) if (c->write_ref_wq) destroy_workqueue(c->write_ref_wq); - if (c->io_complete_wq) - destroy_workqueue(c->io_complete_wq); + if (c->btree_write_submit_wq) + destroy_workqueue(c->btree_write_submit_wq); + if (c->btree_read_complete_wq) + destroy_workqueue(c->btree_read_complete_wq); if (c->copygc_wq) destroy_workqueue(c->copygc_wq); if (c->btree_io_complete_wq) @@ -878,8 +880,10 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || !(c->copygc_wq = alloc_workqueue("bcachefs_copygc", WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) || - !(c->io_complete_wq = alloc_workqueue("bcachefs_io", + !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete", WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) || + !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit", + WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) || !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref", WQ_FREEZABLE, 0)) || #ifndef BCH_WRITE_REF_DEBUG From 1c8cc24eef4a0e824f75e38f82766e4baede24ca Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 5 Jun 2024 21:16:29 -0400 Subject: [PATCH 431/537] bcachefs: Fix incorrect error handling found_btree_node_is_readable() error handling here is slightly odd, which is why we were accidently calling evict() on an error pointer Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_node_scan.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index 45cb8149d374c..2cb0442f6cc90 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -72,10 +72,11 @@ static bool found_btree_node_is_readable(struct btree_trans *trans, struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false); bool ret = !IS_ERR_OR_NULL(b); - if (ret) { - f->sectors_written = b->written; - six_unlock_read(&b->c.lock); - } + if (!ret) + return ret; + + f->sectors_written = b->written; + six_unlock_read(&b->c.lock); /* * We might update this node's range; if that happens, we need the node From 04f635ede85b2e7457f3029b9179079a8ac42ff4 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 6 Jun 2024 14:02:11 -0400 Subject: [PATCH 432/537] bcachefs: Delete incorrect BTREE_ID_NR assertion for forwards compat we now explicitly allow mounting and using filesystems with unknown btrees, and we have to walk them for fsck. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_iter.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index d3bcb4e4e2306..53b63be537e55 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -221,11 +221,8 @@ static void bch2_btree_path_verify(struct btree_trans *trans, struct btree_path *path) { struct bch_fs *c = trans->c; - unsigned i; - - EBUG_ON(path->btree_id >= BTREE_ID_NR); - for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { + for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { if (!path->l[i].b) { BUG_ON(!path->cached && bch2_btree_id_root(c, path->btree_id)->b->c.level > i); @@ -251,8 +248,6 @@ static void bch2_btree_iter_verify(struct btree_iter *iter) { struct btree_trans *trans = iter->trans; - BUG_ON(iter->btree_id >= BTREE_ID_NR); - BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached); BUG_ON((iter->flags & BTREE_ITER_is_extents) && From dab1870439a1176969c5bf06247e088ad0a3551d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 5 Jun 2024 21:45:24 -0400 Subject: [PATCH 433/537] bcachefs: fix stack frame size in fsck.c fsck.c always runs top of the stack so we're not too concerned here; noinline_for_stack is sufficient Signed-off-by: Kent Overstreet --- fs/bcachefs/fsck.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index fd277bd58ed34..921bcdb3e5e4e 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -1677,6 +1677,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w) trans_was_restarted(trans, restart_count); } +noinline_for_stack static int check_dirent_inode_dirent(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c_dirent d, @@ -1773,6 +1774,7 @@ static int check_dirent_inode_dirent(struct btree_trans *trans, return ret; } +noinline_for_stack static int check_dirent_target(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c_dirent d, @@ -1847,6 +1849,7 @@ static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *su return ret; } +noinline_for_stack static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c_dirent d) { From 26447d224a7f48f669bf95a98fa29c8f50da4d63 Mon Sep 17 00:00:00 2001 From: Hongbo Li Date: Mon, 3 Jun 2024 21:23:35 +0800 Subject: [PATCH 434/537] bcachefs: fix the display format for show-super There are three keys displayed in non-uniform format. Let's fix them. [Before] ``` Label: testbcachefs Version: 1.9: (unknown version) Version upgrade complete: 0.0: (unknown version) ``` [After] ``` Label: testbcachefs Version: 1.9: (unknown version) Version upgrade complete: 0.0: (unknown version) ``` Fixes: 7423330e30ab ("bcachefs: prt_printf() now respects \r\n\t") Signed-off-by: Hongbo Li Signed-off-by: Kent Overstreet --- fs/bcachefs/super-io.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index d73a0222f7095..055478d21e9ef 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -1310,15 +1310,15 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb, prt_printf(out, "Device index:\t%u\n", sb->dev_idx); - prt_str(out, "Label:\t"); + prt_printf(out, "Label:\t"); prt_printf(out, "%.*s", (int) sizeof(sb->label), sb->label); prt_newline(out); - prt_str(out, "Version:\t"); + prt_printf(out, "Version:\t"); bch2_version_to_text(out, le16_to_cpu(sb->version)); prt_newline(out); - prt_str(out, "Version upgrade complete:\t"); + prt_printf(out, "Version upgrade complete:\t"); bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb)); prt_newline(out); From 5ae67abcdfdfa49de84be00320ffe8a669ef674f Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 6 Jun 2024 18:56:59 -0400 Subject: [PATCH 435/537] bcachefs: Enable automatic shrinking for rhashtables Since the key cache shrinker walks the rhashtable, a mostly empty rhashtable leads to really nasty reclaim performance issues. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_cache.c | 9 +++++---- fs/bcachefs/btree_key_cache.c | 9 +++++---- fs/bcachefs/io_read.c | 7 ++++--- fs/bcachefs/movinggc.c | 7 ++++--- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 9e4ed75d36756..4f5e411771ba1 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -91,10 +91,11 @@ static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg, } static const struct rhashtable_params bch_btree_cache_params = { - .head_offset = offsetof(struct btree, hash), - .key_offset = offsetof(struct btree, hash_val), - .key_len = sizeof(u64), - .obj_cmpfn = bch2_btree_cache_cmp_fn, + .head_offset = offsetof(struct btree, hash), + .key_offset = offsetof(struct btree, hash_val), + .key_len = sizeof(u64), + .obj_cmpfn = bch2_btree_cache_cmp_fn, + .automatic_shrinking = true, }; static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 34056aaece009..fb731d52b1ecc 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -32,10 +32,11 @@ static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg, } static const struct rhashtable_params bch2_btree_key_cache_params = { - .head_offset = offsetof(struct bkey_cached, hash), - .key_offset = offsetof(struct bkey_cached, key), - .key_len = sizeof(struct bkey_cached_key), - .obj_cmpfn = bch2_btree_key_cache_cmp_fn, + .head_offset = offsetof(struct bkey_cached, hash), + .key_offset = offsetof(struct bkey_cached, key), + .key_len = sizeof(struct bkey_cached_key), + .obj_cmpfn = bch2_btree_key_cache_cmp_fn, + .automatic_shrinking = true, }; __flatten diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index f57486794484d..862b79f86b91e 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -84,9 +84,10 @@ struct promote_op { }; static const struct rhashtable_params bch_promote_params = { - .head_offset = offsetof(struct promote_op, hash), - .key_offset = offsetof(struct promote_op, pos), - .key_len = sizeof(struct bpos), + .head_offset = offsetof(struct promote_op, hash), + .key_offset = offsetof(struct promote_op, pos), + .key_len = sizeof(struct bpos), + .automatic_shrinking = true, }; static inline int should_promote(struct bch_fs *c, struct bkey_s_c k, diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 10bfb31c151b2..eb49dd045eff5 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -35,9 +35,10 @@ struct buckets_in_flight { }; static const struct rhashtable_params bch_move_bucket_params = { - .head_offset = offsetof(struct move_bucket_in_flight, hash), - .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), - .key_len = sizeof(struct move_bucket_key), + .head_offset = offsetof(struct move_bucket_in_flight, hash), + .key_offset = offsetof(struct move_bucket_in_flight, bucket.k), + .key_len = sizeof(struct move_bucket_key), + .automatic_shrinking = true, }; static struct move_bucket_in_flight * From bc65e98e68dac2c0b588e67ea75ee8674c208fc7 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 6 Jun 2024 19:12:11 -0400 Subject: [PATCH 436/537] bcachefs: increase key cache shrinker batch size Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_key_cache.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index fb731d52b1ecc..eaf012ddca08c 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -1026,9 +1026,10 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc) if (!shrink) return -BCH_ERR_ENOMEM_fs_btree_cache_init; bc->shrink = shrink; - shrink->seeks = 0; shrink->count_objects = bch2_btree_key_cache_count; shrink->scan_objects = bch2_btree_key_cache_scan; + shrink->batch = 1 << 14; + shrink->seeks = 0; shrink->private_data = c; shrinker_register(shrink); return 0; From 9ac3e660cac3e29cfc817b6a23735b70f12bd16a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 6 Jun 2024 19:30:41 -0400 Subject: [PATCH 437/537] bcachefs: set sb->s_shrinker->seeks = 0 inodes and dentries are still present in the btree node cache, in much more compact form Signed-off-by: Kent Overstreet --- fs/bcachefs/fs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index cd388f1702dc8..f6f1dbc1fe15c 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -1967,6 +1967,7 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type, sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); sb->s_uuid = c->sb.user_uuid; + sb->s_shrink->seeks = 0; c->vfs_sb = sb; strscpy(sb->s_id, c->name, sizeof(sb->s_id)); From 2760bfe38826f65b1806f1cc62744404b5917dea Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 6 Jun 2024 20:01:34 -0400 Subject: [PATCH 438/537] bcachefs: Fix reporting of freed objects from key cache shrinker We count objects as freed when we move them to the srcu-pending lists because we're doing the equivalent of a kfree_srcu(); the only difference is managing the pending list ourself means we can allocate from the pending list. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_key_cache.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index eaf012ddca08c..2ad3d27c91e6f 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -841,7 +841,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, six_lock_exit(&ck->c.lock); kmem_cache_free(bch2_key_cache, ck); atomic_long_dec(&bc->nr_freed); - freed++; bc->nr_freed_nonpcpu--; bc->freed++; } @@ -855,7 +854,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, six_lock_exit(&ck->c.lock); kmem_cache_free(bch2_key_cache, ck); atomic_long_dec(&bc->nr_freed); - freed++; bc->nr_freed_pcpu--; bc->freed++; } @@ -877,23 +875,22 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { bc->skipped_dirty++; - goto next; } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) { clear_bit(BKEY_CACHED_ACCESSED, &ck->flags); bc->skipped_accessed++; - goto next; - } else if (bkey_cached_lock_for_evict(ck)) { + } else if (!bkey_cached_lock_for_evict(ck)) { + bc->skipped_lock_fail++; + } else { bkey_cached_evict(bc, ck); bkey_cached_free(bc, ck); bc->moved_to_freelist++; - } else { - bc->skipped_lock_fail++; + freed++; } scanned++; if (scanned >= nr) break; -next: + pos = next; } From bf2b356afdcafa18db1b409f7039059d1fd6f25f Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 7 Jun 2024 14:25:18 -0400 Subject: [PATCH 439/537] bcachefs: Leave a buffer in the btree key cache to avoid lock thrashing Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_key_cache.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 2ad3d27c91e6f..2d3c0d45c37f9 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -915,6 +915,14 @@ static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink, long nr = atomic_long_read(&bc->nr_keys) - atomic_long_read(&bc->nr_dirty); + /* + * Avoid hammering our shrinker too much if it's nearly empty - the + * shrinker code doesn't take into account how big our cache is, if it's + * mostly empty but the system is under memory pressure it causes nasty + * lock contention: + */ + nr -= 128; + return max(0L, nr); } From f9035b0ce60cfaf8abd7e1cd5c55690c739aaaf6 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 6 Jun 2024 21:59:12 -0400 Subject: [PATCH 440/537] bcachefs: Fix refcount leak in check_fix_ptrs() fsck_err() does a goto fsck_err on error; factor out check_fix_ptr() so that our error label can drop our device ref. Signed-off-by: Kent Overstreet --- fs/bcachefs/buckets.c | 249 ++++++++++++++++++++++-------------------- 1 file changed, 133 insertions(+), 116 deletions(-) diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index ed97712d0db1e..75a54ed977d72 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -465,143 +465,161 @@ int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 return bch2_update_replicas_list(trans, &r.e, sectors); } -int bch2_check_fix_ptrs(struct btree_trans *trans, - enum btree_id btree, unsigned level, struct bkey_s_c k, - enum btree_iter_update_trigger_flags flags) +static int bch2_check_fix_ptr(struct btree_trans *trans, + struct bkey_s_c k, + struct extent_ptr_decoded p, + const union bch_extent_entry *entry, + bool *do_update) { struct bch_fs *c = trans->c; - struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry_c; - struct extent_ptr_decoded p = { 0 }; - bool do_update = false; struct printbuf buf = PRINTBUF; int ret = 0; - percpu_down_read(&c->mark_lock); + struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); + if (!ca) { + if (fsck_err(c, ptr_to_invalid_device, + "pointer to missing device %u\n" + "while marking %s", + p.ptr.dev, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + *do_update = true; + return 0; + } - bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) { - struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); - if (!ca) { - if (fsck_err(c, ptr_to_invalid_device, - "pointer to missing device %u\n" - "while marking %s", - p.ptr.dev, - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - do_update = true; - continue; - } + struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); + enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry); - struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); - enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry_c); + if (fsck_err_on(!g->gen_valid, + c, ptr_to_missing_alloc_key, + "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), + bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), + p.ptr.gen, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + if (!p.ptr.cached) { + g->gen_valid = true; + g->gen = p.ptr.gen; + } else { + *do_update = true; + } + } - if (fsck_err_on(!g->gen_valid, - c, ptr_to_missing_alloc_key, - "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" - "while marking %s", - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), - p.ptr.gen, - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - if (!p.ptr.cached) { - g->gen_valid = true; - g->gen = p.ptr.gen; - } else { - do_update = true; - } + if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, + c, ptr_gen_newer_than_bucket_gen, + "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), + bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), + p.ptr.gen, g->gen, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + if (!p.ptr.cached && + (g->data_type != BCH_DATA_btree || + data_type == BCH_DATA_btree)) { + g->gen_valid = true; + g->gen = p.ptr.gen; + g->data_type = 0; + g->dirty_sectors = 0; + g->cached_sectors = 0; + } else { + *do_update = true; } + } - if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0, - c, ptr_gen_newer_than_bucket_gen, - "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" - "while marking %s", - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), - p.ptr.gen, g->gen, - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - if (!p.ptr.cached && - (g->data_type != BCH_DATA_btree || - data_type == BCH_DATA_btree)) { - g->gen_valid = true; - g->gen = p.ptr.gen; - g->data_type = 0; - g->dirty_sectors = 0; - g->cached_sectors = 0; - } else { - do_update = true; - } + if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, + c, ptr_gen_newer_than_bucket_gen, + "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, + bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), + p.ptr.gen, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + *do_update = true; + + if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0, + c, stale_dirty_ptr, + "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), + bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), + p.ptr.gen, g->gen, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + *do_update = true; + + if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) + goto out; + + if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type), + c, ptr_bucket_data_type_mismatch, + "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" + "while marking %s", + p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, + bch2_data_type_str(g->data_type), + bch2_data_type_str(data_type), + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + if (data_type == BCH_DATA_btree) { + g->gen_valid = true; + g->gen = p.ptr.gen; + g->data_type = data_type; + g->dirty_sectors = 0; + g->cached_sectors = 0; + } else { + *do_update = true; } + } - if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX, - c, ptr_gen_newer_than_bucket_gen, - "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" + if (p.has_ec) { + struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); + + if (fsck_err_on(!m || !m->alive, c, + ptr_to_missing_stripe, + "pointer to nonexistent stripe %llu\n" "while marking %s", - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, - bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), - p.ptr.gen, + (u64) p.ec.idx, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - do_update = true; + *do_update = true; - if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0, - c, stale_dirty_ptr, - "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" + if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c, + ptr_to_incorrect_stripe, + "pointer does not match stripe %llu\n" "while marking %s", - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), - bch2_data_type_str(ptr_data_type(k.k, &p.ptr)), - p.ptr.gen, g->gen, + (u64) p.ec.idx, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - do_update = true; + *do_update = true; + } +out: +fsck_err: + bch2_dev_put(ca); + printbuf_exit(&buf); + return ret; +} - if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) - goto next; +int bch2_check_fix_ptrs(struct btree_trans *trans, + enum btree_id btree, unsigned level, struct bkey_s_c k, + enum btree_iter_update_trigger_flags flags) +{ + struct bch_fs *c = trans->c; + struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry_c; + struct extent_ptr_decoded p = { 0 }; + bool do_update = false; + struct printbuf buf = PRINTBUF; + int ret = 0; - if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type), - c, ptr_bucket_data_type_mismatch, - "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" - "while marking %s", - p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, - bch2_data_type_str(g->data_type), - bch2_data_type_str(data_type), - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - if (data_type == BCH_DATA_btree) { - g->gen_valid = true; - g->gen = p.ptr.gen; - g->data_type = data_type; - g->dirty_sectors = 0; - g->cached_sectors = 0; - } else { - do_update = true; - } - } + percpu_down_read(&c->mark_lock); - if (p.has_ec) { - struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); - - if (fsck_err_on(!m || !m->alive, c, - ptr_to_missing_stripe, - "pointer to nonexistent stripe %llu\n" - "while marking %s", - (u64) p.ec.idx, - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - do_update = true; - - if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c, - ptr_to_incorrect_stripe, - "pointer does not match stripe %llu\n" - "while marking %s", - (u64) p.ec.idx, - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - do_update = true; - } -next: - bch2_dev_put(ca); + bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) { + ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update); + if (ret) + goto err; } if (do_update) { @@ -716,7 +734,6 @@ int bch2_check_fix_ptrs(struct btree_trans *trans, bch2_btree_node_update_key_early(trans, btree, level - 1, k, new); } err: -fsck_err: percpu_up_read(&c->mark_lock); printbuf_exit(&buf); return ret; From e0cb5722e112811d32d600ef750f9b39e6f684ca Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 7 Jun 2024 21:02:06 -0400 Subject: [PATCH 441/537] bcachefs: Fix snapshot_create_lock lock ordering ====================================================== WARNING: possible circular locking dependency detected 6.10.0-rc2-ktest-00018-gebd1d148b278 #144 Not tainted ------------------------------------------------------ fio/1345 is trying to acquire lock: ffff88813e200ab8 (&c->snapshot_create_lock){++++}-{3:3}, at: bch2_truncate+0x76/0xf0 but task is already holding lock: ffff888105a1fa38 (&sb->s_type->i_mutex_key#13){+.+.}-{3:3}, at: do_truncate+0x7b/0xc0 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (&sb->s_type->i_mutex_key#13){+.+.}-{3:3}: down_write+0x3d/0xd0 bch2_write_iter+0x1c0/0x10f0 vfs_write+0x24a/0x560 __x64_sys_pwrite64+0x77/0xb0 x64_sys_call+0x17e5/0x1ab0 do_syscall_64+0x68/0x130 entry_SYSCALL_64_after_hwframe+0x4b/0x53 -> #1 (sb_writers#10){.+.+}-{0:0}: mnt_want_write+0x4a/0x1d0 filename_create+0x69/0x1a0 user_path_create+0x38/0x50 bch2_fs_file_ioctl+0x315/0xbf0 __x64_sys_ioctl+0x297/0xaf0 x64_sys_call+0x10cb/0x1ab0 do_syscall_64+0x68/0x130 entry_SYSCALL_64_after_hwframe+0x4b/0x53 -> #0 (&c->snapshot_create_lock){++++}-{3:3}: __lock_acquire+0x1445/0x25b0 lock_acquire+0xbd/0x2b0 down_read+0x40/0x180 bch2_truncate+0x76/0xf0 bchfs_truncate+0x240/0x3f0 bch2_setattr+0x7b/0xb0 notify_change+0x322/0x4b0 do_truncate+0x8b/0xc0 do_ftruncate+0x110/0x270 __x64_sys_ftruncate+0x43/0x80 x64_sys_call+0x1373/0x1ab0 do_syscall_64+0x68/0x130 entry_SYSCALL_64_after_hwframe+0x4b/0x53 other info that might help us debug this: Chain exists of: &c->snapshot_create_lock --> sb_writers#10 --> &sb->s_type->i_mutex_key#13 Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sb->s_type->i_mutex_key#13); lock(sb_writers#10); lock(&sb->s_type->i_mutex_key#13); rlock(&c->snapshot_create_lock); *** DEADLOCK *** Signed-off-by: Kent Overstreet --- fs/bcachefs/fs-ioctl.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c index 205a323ffc6dd..3551a737181b2 100644 --- a/fs/bcachefs/fs-ioctl.c +++ b/fs/bcachefs/fs-ioctl.c @@ -308,8 +308,8 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg) return ret; } -static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, - struct bch_ioctl_subvolume arg) +static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, + struct bch_ioctl_subvolume arg) { struct inode *dir; struct bch_inode_info *inode; @@ -406,9 +406,12 @@ static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, !arg.src_ptr) snapshot_src.subvol = inode_inum(to_bch_ei(dir)).subvol; + down_write(&c->snapshot_create_lock); inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir), dst_dentry, arg.mode|S_IFDIR, 0, snapshot_src, create_flags); + up_write(&c->snapshot_create_lock); + error = PTR_ERR_OR_ZERO(inode); if (error) goto err3; @@ -429,16 +432,6 @@ static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, return error; } -static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, - struct bch_ioctl_subvolume arg) -{ - down_write(&c->snapshot_create_lock); - long ret = __bch2_ioctl_subvolume_create(c, filp, arg); - up_write(&c->snapshot_create_lock); - - return ret; -} - static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp, struct bch_ioctl_subvolume arg) { From 9c4acd19bbff5db4629c193366f82960e38d1c6f Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 6 Jun 2024 14:50:06 -0400 Subject: [PATCH 442/537] bcachefs: Replace bucket_valid() asserts in bucket lookup with proper checks The bucket_gens array and gc_buckets array known their own size; we should be using those members, and returning an error. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_gc.c | 2 ++ fs/bcachefs/buckets.c | 2 ++ fs/bcachefs/buckets.h | 6 ++++-- fs/bcachefs/buckets_types.h | 2 ++ 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index dc97991bcd6ad..130a0131cd73e 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -990,6 +990,8 @@ static int bch2_gc_alloc_start(struct bch_fs *c) buckets->first_bucket = ca->mi.first_bucket; buckets->nbuckets = ca->mi.nbuckets; + buckets->nbuckets_minus_first = + buckets->nbuckets - buckets->first_bucket; rcu_assign_pointer(ca->buckets_gc, buckets); } diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 75a54ed977d72..99a7824d0de2d 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -1612,6 +1612,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) bucket_gens->first_bucket = ca->mi.first_bucket; bucket_gens->nbuckets = nbuckets; + bucket_gens->nbuckets_minus_first = + bucket_gens->nbuckets - bucket_gens->first_bucket; if (resize) { down_write(&c->gc_lock); diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index 617ffde2fb7ad..e1a5e3082bbfb 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -93,7 +93,8 @@ static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b) { struct bucket_array *buckets = gc_bucket_array(ca); - BUG_ON(!bucket_valid(ca, b)); + if (b - buckets->first_bucket >= buckets->nbuckets_minus_first) + return NULL; return buckets->b + b; } @@ -110,7 +111,8 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) { struct bucket_gens *gens = bucket_gens(ca); - BUG_ON(!bucket_valid(ca, b)); + if (b - gens->first_bucket >= gens->nbuckets_minus_first) + return NULL; return gens->b + b; } diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h index 6a31740222a71..f636e17c4cafe 100644 --- a/fs/bcachefs/buckets_types.h +++ b/fs/bcachefs/buckets_types.h @@ -22,6 +22,7 @@ struct bucket_array { struct rcu_head rcu; u16 first_bucket; size_t nbuckets; + size_t nbuckets_minus_first; struct bucket b[]; }; @@ -29,6 +30,7 @@ struct bucket_gens { struct rcu_head rcu; u16 first_bucket; size_t nbuckets; + size_t nbuckets_minus_first; u8 b[]; }; From 9432e90df1b8a544f220fd455b2fa39eed8a535d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 6 Jun 2024 15:06:22 -0400 Subject: [PATCH 443/537] bcachefs: Check for invalid bucket from bucket_gen(), gc_bucket() Turn more asserts into proper recoverable error paths. Reported-by: syzbot+246b47da27f8e7e7d6fb@syzkaller.appspotmail.com Signed-off-by: Kent Overstreet --- fs/bcachefs/alloc_background.c | 22 +++++++++++++-- fs/bcachefs/btree_gc.c | 15 ++++++---- fs/bcachefs/buckets.c | 50 ++++++++++++++++++++++++---------- fs/bcachefs/buckets.h | 11 +++++--- fs/bcachefs/ec.c | 26 ++++++++++++++---- fs/bcachefs/extents.c | 9 ++++-- fs/bcachefs/io_read.c | 30 ++++++++++++++------ fs/bcachefs/io_write.c | 19 ++++++++++--- 8 files changed, 135 insertions(+), 47 deletions(-) diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 346cd91f91f99..c4b6601f5b748 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -741,6 +741,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; + struct printbuf buf = PRINTBUF; int ret = 0; struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); @@ -860,8 +861,14 @@ int bch2_trigger_alloc(struct btree_trans *trans, } percpu_down_read(&c->mark_lock); - if (new_a->gen != old_a->gen) - *bucket_gen(ca, new.k->p.offset) = new_a->gen; + if (new_a->gen != old_a->gen) { + u8 *gen = bucket_gen(ca, new.k->p.offset); + if (unlikely(!gen)) { + percpu_up_read(&c->mark_lock); + goto invalid_bucket; + } + *gen = new_a->gen; + } bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false); percpu_up_read(&c->mark_lock); @@ -895,6 +902,11 @@ int bch2_trigger_alloc(struct btree_trans *trans, percpu_down_read(&c->mark_lock); struct bucket *g = gc_bucket(ca, new.k->p.offset); + if (unlikely(!g)) { + percpu_up_read(&c->mark_lock); + goto invalid_bucket; + } + g->gen_valid = 1; bucket_lock(g); @@ -910,8 +922,14 @@ int bch2_trigger_alloc(struct btree_trans *trans, percpu_up_read(&c->mark_lock); } err: + printbuf_exit(&buf); bch2_dev_put(ca); return ret; +invalid_bucket: + bch2_fs_inconsistent(c, "reference to invalid bucket\n %s", + (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf)); + ret = -EIO; + goto err; } /* diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 130a0131cd73e..0e477a926579a 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -874,6 +874,9 @@ static int bch2_alloc_write_key(struct btree_trans *trans, const struct bch_alloc_v4 *old; int ret; + if (!bucket_valid(ca, k.k->p.offset)) + return 0; + old = bch2_alloc_to_v4(k, &old_convert); gc = new = *old; @@ -1005,12 +1008,14 @@ static int bch2_gc_alloc_start(struct bch_fs *c) continue; } - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); + if (bucket_valid(ca, k.k->p.offset)) { + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); - struct bucket *g = gc_bucket(ca, k.k->p.offset); - g->gen_valid = 1; - g->gen = a->gen; + struct bucket *g = gc_bucket(ca, k.k->p.offset); + g->gen_valid = 1; + g->gen = a->gen; + } 0; }))); bch2_dev_put(ca); diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 99a7824d0de2d..743d57eba7607 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -488,6 +488,17 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, } struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr); + if (!g) { + if (fsck_err(c, ptr_to_invalid_device, + "pointer to invalid bucket on device %u\n" + "while marking %s", + p.ptr.dev, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + *do_update = true; + goto out; + } + enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry); if (fsck_err_on(!g->gen_valid, @@ -577,8 +588,8 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, if (p.has_ec) { struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); - if (fsck_err_on(!m || !m->alive, c, - ptr_to_missing_stripe, + if (fsck_err_on(!m || !m->alive, + c, ptr_to_missing_stripe, "pointer to nonexistent stripe %llu\n" "while marking %s", (u64) p.ec.idx, @@ -586,8 +597,8 @@ static int bch2_check_fix_ptr(struct btree_trans *trans, bch2_bkey_val_to_text(&buf, c, k), buf.buf))) *do_update = true; - if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c, - ptr_to_incorrect_stripe, + if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), + c, ptr_to_incorrect_stripe, "pointer does not match stripe %llu\n" "while marking %s", (u64) p.ec.idx, @@ -1004,6 +1015,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans, enum btree_iter_update_trigger_flags flags) { bool insert = !(flags & BTREE_TRIGGER_overwrite); + struct printbuf buf = PRINTBUF; int ret = 0; struct bch_fs *c = trans->c; @@ -1036,6 +1048,13 @@ static int bch2_trigger_pointer(struct btree_trans *trans, if (flags & BTREE_TRIGGER_gc) { percpu_down_read(&c->mark_lock); struct bucket *g = gc_bucket(ca, bucket.offset); + if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", + p.ptr.dev, + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + ret = -EIO; + goto err_unlock; + } + bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new); @@ -1044,10 +1063,12 @@ static int bch2_trigger_pointer(struct btree_trans *trans, bch2_dev_usage_update(c, ca, &old, &new, 0, true); } bucket_unlock(g); +err_unlock: percpu_up_read(&c->mark_lock); } err: bch2_dev_put(ca); + printbuf_exit(&buf); return ret; } @@ -1335,10 +1356,11 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b, enum bch_data_type data_type, unsigned sectors, enum btree_iter_update_trigger_flags flags) { - int ret = 0; - percpu_down_read(&c->mark_lock); struct bucket *g = gc_bucket(ca, b); + if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s", + ca->dev_idx, bch2_data_type_str(data_type))) + goto err_unlock; bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g); @@ -1347,29 +1369,27 @@ static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, g->data_type != data_type, c, "different types of data in same bucket: %s, %s", bch2_data_type_str(g->data_type), - bch2_data_type_str(data_type))) { - ret = -EIO; + bch2_data_type_str(data_type))) goto err; - } if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c, "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size", ca->dev_idx, b, g->gen, bch2_data_type_str(g->data_type ?: data_type), - g->dirty_sectors, sectors)) { - ret = -EIO; + g->dirty_sectors, sectors)) goto err; - } g->data_type = data_type; g->dirty_sectors += sectors; struct bch_alloc_v4 new = bucket_m_to_alloc(*g); + bch2_dev_usage_update(c, ca, &old, &new, 0, true); + percpu_up_read(&c->mark_lock); + return 0; err: bucket_unlock(g); - if (!ret) - bch2_dev_usage_update(c, ca, &old, &new, 0, true); +err_unlock: percpu_up_read(&c->mark_lock); - return ret; + return -EIO; } int bch2_trans_mark_metadata_bucket(struct btree_trans *trans, diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index e1a5e3082bbfb..80ee0be9793e6 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -172,19 +172,22 @@ static inline int gen_after(u8 a, u8 b) return r > 0 ? r : 0; } -static inline u8 dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) +static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) { - return gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen); + u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)); + if (!gen) + return -1; + return gen_after(*gen, ptr->gen); } /** * dev_ptr_stale() - check if a pointer points into a bucket that has been * invalidated. */ -static inline u8 dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr) +static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr) { rcu_read_lock(); - u8 ret = dev_ptr_stale_rcu(ca, ptr); + int ret = dev_ptr_stale_rcu(ca, ptr); rcu_read_unlock(); return ret; diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index d8b9beca37762..83e279d41829d 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -268,6 +268,7 @@ static int mark_stripe_bucket(struct btree_trans *trans, { struct bch_fs *c = trans->c; const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx; + struct printbuf buf = PRINTBUF; int ret = 0; struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev); @@ -289,6 +290,13 @@ static int mark_stripe_bucket(struct btree_trans *trans, if (flags & BTREE_TRIGGER_gc) { percpu_down_read(&c->mark_lock); struct bucket *g = gc_bucket(ca, bucket.offset); + if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", + ptr->dev, + (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { + ret = -EIO; + goto err_unlock; + } + bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); @@ -297,10 +305,12 @@ static int mark_stripe_bucket(struct btree_trans *trans, bch2_dev_usage_update(c, ca, &old, &new, 0, true); } bucket_unlock(g); +err_unlock: percpu_up_read(&c->mark_lock); } err: bch2_dev_put(ca); + printbuf_exit(&buf); return ret; } @@ -714,10 +724,12 @@ static void ec_block_endio(struct bio *bio) bch2_blk_status_to_str(bio->bi_status))) clear_bit(ec_bio->idx, ec_bio->buf->valid); - if (dev_ptr_stale(ca, ptr)) { + int stale = dev_ptr_stale(ca, ptr); + if (stale) { bch_err_ratelimited(ca->fs, - "error %s stripe: stale pointer after io", - bio_data_dir(bio) == READ ? "reading from" : "writing to"); + "error %s stripe: stale/invalid pointer (%i) after io", + bio_data_dir(bio) == READ ? "reading from" : "writing to", + stale); clear_bit(ec_bio->idx, ec_bio->buf->valid); } @@ -743,10 +755,12 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, return; } - if (dev_ptr_stale(ca, ptr)) { + int stale = dev_ptr_stale(ca, ptr); + if (stale) { bch_err_ratelimited(c, - "error %s stripe: stale pointer", - rw == READ ? "reading from" : "writing to"); + "error %s stripe: stale pointer (%i)", + rw == READ ? "reading from" : "writing to", + stale); clear_bit(idx, buf->valid); return; } diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 469037929685b..410b8bd81b5a6 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -137,7 +137,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev); - if (p.ptr.cached && (!ca || dev_ptr_stale(ca, &p.ptr))) + if (p.ptr.cached && (!ca || dev_ptr_stale_rcu(ca, &p.ptr))) continue; f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL; @@ -999,7 +999,7 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k) bch2_bkey_drop_ptrs(k, ptr, ptr->cached && (ca = bch2_dev_rcu(c, ptr->dev)) && - dev_ptr_stale_rcu(ca, ptr)); + dev_ptr_stale_rcu(ca, ptr) > 0); rcu_read_unlock(); return bkey_deleted(k.k); @@ -1024,8 +1024,11 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc prt_str(out, " cached"); if (ptr->unwritten) prt_str(out, " unwritten"); - if (bucket_valid(ca, b) && dev_ptr_stale_rcu(ca, ptr)) + int stale = dev_ptr_stale_rcu(ca, ptr); + if (stale > 0) prt_printf(out, " stale"); + else if (stale) + prt_printf(out, " invalid"); } rcu_read_unlock(); --out->atomic; diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index 862b79f86b91e..c97fa7002b06e 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -777,18 +777,32 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, PTR_BUCKET_POS(ca, &ptr), BTREE_ITER_cached); - prt_printf(&buf, "Attempting to read from stale dirty pointer:\n"); - printbuf_indent_add(&buf, 2); + u8 *gen = bucket_gen(ca, iter.pos.offset); + if (gen) { - bch2_bkey_val_to_text(&buf, c, k); - prt_newline(&buf); + prt_printf(&buf, "Attempting to read from stale dirty pointer:\n"); + printbuf_indent_add(&buf, 2); - prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset)); - - ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); - if (!ret) { + bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); + + prt_printf(&buf, "memory gen: %u", *gen); + + ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); + if (!ret) { + prt_newline(&buf); + bch2_bkey_val_to_text(&buf, c, k); + } + } else { + prt_printf(&buf, "Attempting to read from invalid bucket %llu:%llu:\n", + iter.pos.inode, iter.pos.offset); + printbuf_indent_add(&buf, 2); + + prt_printf(&buf, "first bucket %u nbuckets %llu\n", + ca->mi.first_bucket, ca->mi.nbuckets); + bch2_bkey_val_to_text(&buf, c, k); + prt_newline(&buf); } bch2_fs_inconsistent(c, "%s", buf.buf); diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index 9401d13e31bb6..05e0cbef420bc 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -1220,7 +1220,7 @@ static void bch2_nocow_write(struct bch_write_op *op) DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets; u32 snapshot; struct bucket_to_lock *stale_at; - int ret; + int stale, ret; if (op->flags & BCH_WRITE_MOVE) return; @@ -1299,7 +1299,8 @@ static void bch2_nocow_write(struct bch_write_op *op) BUCKET_NOCOW_LOCK_UPDATE); rcu_read_lock(); - bool stale = gen_after(*bucket_gen(ca, i->b.offset), i->gen); + u8 *gen = bucket_gen(ca, i->b.offset); + stale = !gen ? -1 : gen_after(*gen, i->gen); rcu_read_unlock(); if (unlikely(stale)) { @@ -1380,8 +1381,18 @@ static void bch2_nocow_write(struct bch_write_op *op) break; } - /* We can retry this: */ - ret = -BCH_ERR_transaction_restart; + struct printbuf buf = PRINTBUF; + if (bch2_fs_inconsistent_on(stale < 0, c, + "pointer to invalid bucket in nocow path on device %llu\n %s", + stale_at->b.inode, + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + ret = -EIO; + } else { + /* We can retry this: */ + ret = -BCH_ERR_transaction_restart; + } + printbuf_exit(&buf); + goto err_get_ioref; } From b79922009214e6ab23c07db32a5606a45710f86e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 8 Jun 2024 17:36:24 -0400 Subject: [PATCH 444/537] bcachefs: Add missing synchronize_srcu_expedited() call when shutting down We use the polling interface to srcu for tracking pending frees; when shutting down we don't need to wait for an srcu barrier to free them, but SRCU still gets confused if we shutdown with an outstanding grace period. Reported-by: syzbot+6a038377f0a594d7d44e@syzkaller.appspotmail.com Reported-by: syzbot+0ece6edfd05ed20e32d9@syzkaller.appspotmail.com Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_iter.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 53b63be537e55..3694c600a3add 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -3401,8 +3401,10 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c) bch2_time_stats_exit(&s->lock_hold_times); } - if (c->btree_trans_barrier_initialized) + if (c->btree_trans_barrier_initialized) { + synchronize_srcu_expedited(&c->btree_trans_barrier); cleanup_srcu_struct(&c->btree_trans_barrier); + } mempool_exit(&c->btree_trans_mem_pool); mempool_exit(&c->btree_trans_pool); } From 31849bf07e0fb3e7d050c086b77ebdb6cec89167 Mon Sep 17 00:00:00 2001 From: Arunpravin Paneer Selvam Date: Mon, 10 Jun 2024 23:34:01 +0530 Subject: [PATCH 445/537] drm/amdgpu: Fix the BO release clear memory warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This happens when the amdgpu_bo_release_notify running before amdgpu_ttm_set_buffer_funcs_status set the buffer funcs to enabled. check the buffer funcs enablement before calling the fill buffer memory. v2:(Christian) - Apply it only for GEM buffers and since GEM buffers are only allocated/freed while the driver is loaded we never run into the issue to clear with buffer funcs disabled. v3:(Mario) - drop the stable tag as this will presumably go into a -fixes PR for 6.10 Log snip: *ERROR* Trying to clear memory with ring turned off. RIP: 0010:amdgpu_bo_release_notify+0x201/0x220 [amdgpu] Fixes: a68c7eaa7a8f ("drm/amdgpu: Enable clear page functionality") Signed-off-by: Arunpravin Paneer Selvam Reviewed-by: Christian König Tested-by: Mikhail Gavrilov Tested-by: Richard Gong Suggested-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20240610180401.9540-1-Arunpravin.PaneerSelvam@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 67c234bcf89ff..3adaa46701036 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -108,6 +108,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, memset(&bp, 0, sizeof(bp)); *obj = NULL; + flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; bp.size = size; bp.byte_align = alignment; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8d8c39be61295..c556c8b653fa4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -604,8 +604,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; - bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; - bo->tbo.bdev = &adev->mman.bdev; if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | AMDGPU_GEM_DOMAIN_GDS)) From 07c54cc5988f19c9642fd463c2dbdac7fc52f777 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 28 May 2024 14:20:19 +0200 Subject: [PATCH 446/537] tick/nohz_full: Don't abuse smp_call_function_single() in tick_setup_device() After the recent commit 5097cbcb38e6 ("sched/isolation: Prevent boot crash when the boot CPU is nohz_full") the kernel no longer crashes, but there is another problem. In this case tick_setup_device() calls tick_take_do_timer_from_boot() to update tick_do_timer_cpu and this triggers the WARN_ON_ONCE(irqs_disabled) in smp_call_function_single(). Kill tick_take_do_timer_from_boot() and just use WRITE_ONCE(), the new comment explains why this is safe (thanks Thomas!). Fixes: 08ae95f4fd3b ("nohz_full: Allow the boot CPU to be nohz_full") Signed-off-by: Oleg Nesterov Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240528122019.GA28794@redhat.com Link: https://lore.kernel.org/all/20240522151742.GA10400@redhat.com --- kernel/time/tick-common.c | 42 +++++++++++++-------------------------- 1 file changed, 14 insertions(+), 28 deletions(-) diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index d88b13076b794..a47bcf71defcf 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -178,26 +178,6 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) } } -#ifdef CONFIG_NO_HZ_FULL -static void giveup_do_timer(void *info) -{ - int cpu = *(unsigned int *)info; - - WARN_ON(tick_do_timer_cpu != smp_processor_id()); - - tick_do_timer_cpu = cpu; -} - -static void tick_take_do_timer_from_boot(void) -{ - int cpu = smp_processor_id(); - int from = tick_do_timer_boot_cpu; - - if (from >= 0 && from != cpu) - smp_call_function_single(from, giveup_do_timer, &cpu, 1); -} -#endif - /* * Setup the tick device */ @@ -221,19 +201,25 @@ static void tick_setup_device(struct tick_device *td, tick_next_period = ktime_get(); #ifdef CONFIG_NO_HZ_FULL /* - * The boot CPU may be nohz_full, in which case set - * tick_do_timer_boot_cpu so the first housekeeping - * secondary that comes up will take do_timer from - * us. + * The boot CPU may be nohz_full, in which case the + * first housekeeping secondary will take do_timer() + * from it. */ if (tick_nohz_full_cpu(cpu)) tick_do_timer_boot_cpu = cpu; - } else if (tick_do_timer_boot_cpu != -1 && - !tick_nohz_full_cpu(cpu)) { - tick_take_do_timer_from_boot(); + } else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) { tick_do_timer_boot_cpu = -1; - WARN_ON(READ_ONCE(tick_do_timer_cpu) != cpu); + /* + * The boot CPU will stay in periodic (NOHZ disabled) + * mode until clocksource_done_booting() called after + * smp_init() selects a high resolution clocksource and + * timekeeping_notify() kicks the NOHZ stuff alive. + * + * So this WRITE_ONCE can only race with the READ_ONCE + * check in tick_periodic() but this race is harmless. + */ + WRITE_ONCE(tick_do_timer_cpu, cpu); #endif } From 7124a8982b621e1a8af81c17f44b90587cdd161c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Mon, 10 Jun 2024 20:48:41 -0400 Subject: [PATCH 447/537] bcachefs: Add missing bch_inode_info.ei_flags init Signed-off-by: Kent Overstreet --- fs/bcachefs/fs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index f6f1dbc1fe15c..77126992dba8c 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -227,7 +227,9 @@ static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c) mutex_init(&inode->ei_update_lock); two_state_lock_init(&inode->ei_pagecache_lock); INIT_LIST_HEAD(&inode->ei_vfs_inode_list); + inode->ei_flags = 0; mutex_init(&inode->ei_quota_lock); + memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush)); inode->v.i_state = 0; if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) { From 44180feaccf266d9b0b28cc4ceaac019817deb5c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 7 Jun 2024 17:53:32 +0200 Subject: [PATCH 448/537] net/sched: initialize noop_qdisc owner When the noop_qdisc owner isn't initialized, then it will be 0, so packets will erroneously be regarded as having been subject to recursion as long as only CPU 0 queues them. For non-SMP, that's all packets, of course. This causes a change in what's reported to userspace, normally noop_qdisc would drop packets silently, but with this change the syscall returns -ENOBUFS if RECVERR is also set on the socket. Fix this by initializing the owner field to -1, just like it would be for dynamically allocated qdiscs by qdisc_alloc(). Fixes: 0f022d32c3ec ("net/sched: Fix mirred deadlock on device recursion") Signed-off-by: Johannes Berg Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20240607175340.786bfb938803.I493bf8422e36be4454c08880a8d3703cea8e421a@changeid Signed-off-by: Jakub Kicinski --- net/sched/sch_generic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 2a637a17061b9..e22ff003d52ee 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -676,6 +676,7 @@ struct Qdisc noop_qdisc = { .qlen = 0, .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), }, + .owner = -1, }; EXPORT_SYMBOL(noop_qdisc); From 8031b58c3a9b1db3ef68b3bd749fbee2e1e1aaa3 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 7 Jun 2024 17:01:48 +0200 Subject: [PATCH 449/537] mptcp: ensure snd_una is properly initialized on connect This is strictly related to commit fb7a0d334894 ("mptcp: ensure snd_nxt is properly initialized on connect"). It turns out that syzkaller can trigger the retransmit after fallback and before processing any other incoming packet - so that snd_una is still left uninitialized. Address the issue explicitly initializing snd_una together with snd_nxt and write_seq. Suggested-by: Mat Martineau Fixes: 8fd738049ac3 ("mptcp: fallback in case of simultaneous connect") Cc: stable@vger.kernel.org Reported-by: Christoph Paasch Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/485 Signed-off-by: Paolo Abeni Reviewed-by: Mat Martineau Signed-off-by: Matthieu Baerts (NGI0) Link: https://lore.kernel.org/r/20240607-upstream-net-20240607-misc-fixes-v1-1-1ab9ddfa3d00@kernel.org Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 96b113854bd3c..bb7dca8aa2d9c 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3740,6 +3740,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) WRITE_ONCE(msk->write_seq, subflow->idsn); WRITE_ONCE(msk->snd_nxt, subflow->idsn); + WRITE_ONCE(msk->snd_una, subflow->idsn); if (likely(!__mptcp_check_fallback(msk))) MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE); From 6a09788c1a66e3d8b04b3b3e7618cc817bb60ae9 Mon Sep 17 00:00:00 2001 From: YonglongLi Date: Fri, 7 Jun 2024 17:01:49 +0200 Subject: [PATCH 450/537] mptcp: pm: inc RmAddr MIB counter once per RM_ADDR ID The RmAddr MIB counter is supposed to be incremented once when a valid RM_ADDR has been received. Before this patch, it could have been incremented as many times as the number of subflows connected to the linked address ID, so it could have been 0, 1 or more than 1. The "RmSubflow" is incremented after a local operation. In this case, it is normal to tied it with the number of subflows that have been actually removed. The "remove invalid addresses" MP Join subtest has been modified to validate this case. A broadcast IP address is now used instead: the client will not be able to create a subflow to this address. The consequence is that when receiving the RM_ADDR with the ID attached to this broadcast IP address, no subflow linked to this ID will be found. Fixes: 7a7e52e38a40 ("mptcp: add RM_ADDR related mibs") Cc: stable@vger.kernel.org Co-developed-by: Matthieu Baerts (NGI0) Signed-off-by: Matthieu Baerts (NGI0) Signed-off-by: YonglongLi Signed-off-by: Matthieu Baerts (NGI0) Link: https://lore.kernel.org/r/20240607-upstream-net-20240607-misc-fixes-v1-2-1ab9ddfa3d00@kernel.org Signed-off-by: Jakub Kicinski --- net/mptcp/pm_netlink.c | 5 ++++- tools/testing/selftests/net/mptcp/mptcp_join.sh | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 7f53e022e27ec..766a8409fa677 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -814,10 +814,13 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, spin_lock_bh(&msk->pm.lock); removed = true; - __MPTCP_INC_STATS(sock_net(sk), rm_type); + if (rm_type == MPTCP_MIB_RMSUBFLOW) + __MPTCP_INC_STATS(sock_net(sk), rm_type); } if (rm_type == MPTCP_MIB_RMSUBFLOW) __set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap); + else if (rm_type == MPTCP_MIB_RMADDR) + __MPTCP_INC_STATS(sock_net(sk), rm_type); if (!removed) continue; diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 2b66c5fa71ebf..aea314d140c95 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -2250,7 +2250,8 @@ remove_tests() pm_nl_set_limits $ns1 3 3 pm_nl_add_endpoint $ns1 10.0.12.1 flags signal pm_nl_add_endpoint $ns1 10.0.3.1 flags signal - pm_nl_add_endpoint $ns1 10.0.14.1 flags signal + # broadcast IP: no packet for this address will be received on ns1 + pm_nl_add_endpoint $ns1 224.0.0.1 flags signal pm_nl_set_limits $ns2 3 3 addr_nr_ns1=-3 speed=10 \ run_tests $ns1 $ns2 10.0.1.1 From 40eec1795cc27b076d49236649a29507c7ed8c2d Mon Sep 17 00:00:00 2001 From: YonglongLi Date: Fri, 7 Jun 2024 17:01:50 +0200 Subject: [PATCH 451/537] mptcp: pm: update add_addr counters after connect The creation of new subflows can fail for different reasons. If no subflow have been created using the received ADD_ADDR, the related counters should not be updated, otherwise they will never be decremented for events related to this ID later on. For the moment, the number of accepted ADD_ADDR is only decremented upon the reception of a related RM_ADDR, and only if the remote address ID is currently being used by at least one subflow. In other words, if no subflow can be created with the received address, the counter will not be decremented. In this case, it is then important not to increment pm.add_addr_accepted counter, and not to modify pm.accept_addr bit. Note that this patch does not modify the behaviour in case of failures later on, e.g. if the MP Join is dropped or rejected. The "remove invalid addresses" MP Join subtest has been modified to validate this case. The broadcast IP address is added before the "valid" address that will be used to successfully create a subflow, and the limit is decreased by one: without this patch, it was not possible to create the last subflow, because: - the broadcast address would have been accepted even if it was not usable: the creation of a subflow to this address results in an error, - the limit of 2 accepted ADD_ADDR would have then been reached. Fixes: 01cacb00b35c ("mptcp: add netlink-based PM") Cc: stable@vger.kernel.org Co-developed-by: Matthieu Baerts (NGI0) Signed-off-by: Matthieu Baerts (NGI0) Signed-off-by: YonglongLi Reviewed-by: Mat Martineau Signed-off-by: Matthieu Baerts (NGI0) Link: https://lore.kernel.org/r/20240607-upstream-net-20240607-misc-fixes-v1-3-1ab9ddfa3d00@kernel.org Signed-off-by: Jakub Kicinski --- net/mptcp/pm_netlink.c | 16 ++++++++++------ tools/testing/selftests/net/mptcp/mptcp_join.sh | 4 ++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 766a8409fa677..ea9e5817b9e9d 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -677,6 +677,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) unsigned int add_addr_accept_max; struct mptcp_addr_info remote; unsigned int subflows_max; + bool sf_created = false; int i, nr; add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); @@ -704,15 +705,18 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) if (nr == 0) return; - msk->pm.add_addr_accepted++; - if (msk->pm.add_addr_accepted >= add_addr_accept_max || - msk->pm.subflows >= subflows_max) - WRITE_ONCE(msk->pm.accept_addr, false); - spin_unlock_bh(&msk->pm.lock); for (i = 0; i < nr; i++) - __mptcp_subflow_connect(sk, &addrs[i], &remote); + if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0) + sf_created = true; spin_lock_bh(&msk->pm.lock); + + if (sf_created) { + msk->pm.add_addr_accepted++; + if (msk->pm.add_addr_accepted >= add_addr_accept_max || + msk->pm.subflows >= subflows_max) + WRITE_ONCE(msk->pm.accept_addr, false); + } } void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index aea314d140c95..108aeeb84ef10 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -2249,10 +2249,10 @@ remove_tests() if reset "remove invalid addresses"; then pm_nl_set_limits $ns1 3 3 pm_nl_add_endpoint $ns1 10.0.12.1 flags signal - pm_nl_add_endpoint $ns1 10.0.3.1 flags signal # broadcast IP: no packet for this address will be received on ns1 pm_nl_add_endpoint $ns1 224.0.0.1 flags signal - pm_nl_set_limits $ns2 3 3 + pm_nl_add_endpoint $ns1 10.0.3.1 flags signal + pm_nl_set_limits $ns2 2 2 addr_nr_ns1=-3 speed=10 \ run_tests $ns1 $ns2 10.0.1.1 chk_join_nr 1 1 1 From 74acb250e103f42be372177628f9272b6e888c49 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Fri, 7 Jun 2024 17:01:51 +0200 Subject: [PATCH 452/537] mailmap: map Geliang's new email address Just like my other email addresses, map my new one to kernel.org account too. My new email address uses "last name, first name" format, which is different from my other email addresses. This mailmap is also used to indicate that it is actually the same person. Suggested-by: Mat Martineau Suggested-by: Matthieu Baerts Signed-off-by: Geliang Tang Reviewed-by: Matthieu Baerts (NGI0) Signed-off-by: Matthieu Baerts (NGI0) Link: https://lore.kernel.org/r/20240607-upstream-net-20240607-misc-fixes-v1-4-1ab9ddfa3d00@kernel.org Signed-off-by: Jakub Kicinski --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index efd9fa867a8ec..9af91bd3584bd 100644 --- a/.mailmap +++ b/.mailmap @@ -217,6 +217,7 @@ Geliang Tang Geliang Tang Geliang Tang Geliang Tang +Geliang Tang Georgi Djakov Gerald Schaefer Gerald Schaefer From 36534d3c54537bf098224a32dc31397793d4594d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 7 Jun 2024 12:56:52 +0000 Subject: [PATCH 453/537] tcp: use signed arithmetic in tcp_rtx_probe0_timed_out() Due to timer wheel implementation, a timer will usually fire after its schedule. For instance, for HZ=1000, a timeout between 512ms and 4s has a granularity of 64ms. For this range of values, the extra delay could be up to 63ms. For TCP, this means that tp->rcv_tstamp may be after inet_csk(sk)->icsk_timeout whenever the timer interrupt finally triggers, if one packet came during the extra delay. We need to make sure tcp_rtx_probe0_timed_out() handles this case. Fixes: e89688e3e978 ("net: tcp: fix unexcepted socket die when snd_wnd is 0") Signed-off-by: Eric Dumazet Cc: Menglong Dong Acked-by: Neal Cardwell Reviewed-by: Jason Xing Link: https://lore.kernel.org/r/20240607125652.1472540-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp_timer.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 83fe7f62f7f10..5bfd76a31af6d 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -485,8 +485,12 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk, { const struct tcp_sock *tp = tcp_sk(sk); const int timeout = TCP_RTO_MAX * 2; - u32 rcv_delta; + s32 rcv_delta; + /* Note: timer interrupt might have been delayed by at least one jiffy, + * and tp->rcv_tstamp might very well have been written recently. + * rcv_delta can thus be negative. + */ rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp; if (rcv_delta <= timeout) return false; From b96a225377b6602299a03d2ce3c289b68cd41bb7 Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Fri, 7 Jun 2024 15:09:32 -0700 Subject: [PATCH 454/537] drm/nouveau: don't attempt to schedule hpd_work on headless cards If the card doesn't have display hardware, hpd_work and hpd_lock are left uninitialized which causes BUG when attempting to schedule hpd_work on runtime PM resume. Fix it by adding headless flag to DRM and skip any hpd if it's set. Fixes: ae1aadb1eb8d ("nouveau: don't fail driver load if no display hw present.") Link: https://gitlab.freedesktop.org/drm/nouveau/-/issues/337 Signed-off-by: Vasily Khoruzhick Reviewed-by: Ben Skeggs Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20240607221032.25918-1-anarsoul@gmail.com --- drivers/gpu/drm/nouveau/dispnv04/disp.c | 2 +- drivers/gpu/drm/nouveau/dispnv50/disp.c | 2 +- drivers/gpu/drm/nouveau/nouveau_display.c | 6 +++++- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 13705c5f14973..4b7497a8755cd 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend) if (nv_two_heads(dev)) NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); if (!suspend) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 88728a0b2c259..674dc567e1798 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend) nv50_mstm_fini(nouveau_encoder(encoder)); } - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index aed5d5b51b43d..d4725a968827e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); + if (drm->headless) + return; + spin_lock_irq(&drm->hpd_lock); drm->hpd_pending = ~0; spin_unlock_irq(&drm->hpd_lock); @@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) } drm_connector_list_iter_end(&conn_iter); - if (!runtime) + if (!runtime && !drm->headless) cancel_work_sync(&drm->hpd_work); drm_kms_helper_poll_disable(dev); @@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev) /* no display hw */ if (ret == -ENODEV) { ret = 0; + drm->headless = true; goto disp_create_err; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e239c6bf4afa4..25fca98a20bcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -276,6 +276,7 @@ struct nouveau_drm { /* modesetting */ struct nvbios vbios; struct nouveau_display *display; + bool headless; struct work_struct hpd_work; spinlock_t hpd_lock; u32 hpd_pending; From b01b8151efe47a432f3f73623a6c1438727e7880 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 9 Jun 2024 10:21:39 +0200 Subject: [PATCH 455/537] s390: Update defconfigs Signed-off-by: Heiko Carstens Acked-by: Vasily Gorbik Signed-off-by: Vasily Gorbik --- arch/s390/configs/debug_defconfig | 43 ++++++++++++++++++++++------ arch/s390/configs/defconfig | 40 ++++++++++++++++++++++---- arch/s390/configs/zfcpdump_defconfig | 5 +--- 3 files changed, 69 insertions(+), 19 deletions(-) diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 145342e46ea83..8c4adece89112 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -43,7 +43,6 @@ CONFIG_PROFILING=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y CONFIG_KEXEC_SIG=y -CONFIG_CRASH_DUMP=y CONFIG_LIVEPATCH=y CONFIG_MARCH_Z13=y CONFIG_NR_CPUS=512 @@ -51,6 +50,7 @@ CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_CERT_STORE=y CONFIG_EXPOLINE=y +# CONFIG_EXPOLINE_EXTERN is not set CONFIG_EXPOLINE_AUTO=y CONFIG_CHSC_SCH=y CONFIG_VFIO_CCW=m @@ -76,6 +76,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y CONFIG_BLK_CGROUP_IOLATENCY=y @@ -100,7 +101,6 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_CMA_DEBUG=y CONFIG_CMA_DEBUGFS=y CONFIG_CMA_SYSFS=y CONFIG_CMA_AREAS=7 @@ -119,6 +119,7 @@ CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m CONFIG_SMC_DIAG=m +CONFIG_SMC_LO=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -133,7 +134,6 @@ CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -167,6 +167,7 @@ CONFIG_BRIDGE_NETFILTER=m CONFIG_NETFILTER_NETLINK_HOOK=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y CONFIG_NF_CONNTRACK_PROCFS=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y @@ -183,17 +184,39 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m -CONFIG_NETFILTER_XTABLES_COMPAT=y +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NF_FLOW_TABLE_PROCFS=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -206,8 +229,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m @@ -216,6 +241,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m @@ -230,6 +256,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m CONFIG_NETFILTER_XT_MATCH_LENGTH=m @@ -247,6 +274,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -302,7 +330,6 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NFT_FIB_IPV6=m @@ -373,7 +400,6 @@ CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m @@ -462,6 +488,7 @@ CONFIG_DM_VERITY=m CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y CONFIG_DM_SWITCH=m CONFIG_DM_INTEGRITY=m +CONFIG_DM_VDO=m CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m @@ -574,7 +601,6 @@ CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m -# CONFIG_DRM_DEBUG_MODESET_LOCK is not set CONFIG_FB=y # CONFIG_FB_DEVICE is not set CONFIG_FRAMEBUFFER_CONSOLE=y @@ -645,7 +671,6 @@ CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_EXFAT_FS=m CONFIG_NTFS_FS=m -CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y @@ -663,6 +688,7 @@ CONFIG_SQUASHFS_XZ=y CONFIG_SQUASHFS_ZSTD=y CONFIG_ROMFS_FS=m CONFIG_NFS_FS=m +CONFIG_NFS_V2=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y @@ -879,6 +905,5 @@ CONFIG_RBTREE_TEST=y CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y -CONFIG_STRING_SELFTEST=y CONFIG_TEST_BITOPS=m CONFIG_TEST_BPF=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index dc237896f99d3..6dd11d3b6aaa6 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -41,7 +41,6 @@ CONFIG_PROFILING=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y CONFIG_KEXEC_SIG=y -CONFIG_CRASH_DUMP=y CONFIG_LIVEPATCH=y CONFIG_MARCH_Z13=y CONFIG_NR_CPUS=512 @@ -49,6 +48,7 @@ CONFIG_NUMA=y CONFIG_HZ_100=y CONFIG_CERT_STORE=y CONFIG_EXPOLINE=y +# CONFIG_EXPOLINE_EXTERN is not set CONFIG_EXPOLINE_AUTO=y CONFIG_CHSC_SCH=y CONFIG_VFIO_CCW=m @@ -71,6 +71,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y CONFIG_BLK_CGROUP_IOLATENCY=y @@ -110,6 +111,7 @@ CONFIG_UNIX_DIAG=m CONFIG_XFRM_USER=m CONFIG_NET_KEY=m CONFIG_SMC_DIAG=m +CONFIG_SMC_LO=y CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y @@ -124,7 +126,6 @@ CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m CONFIG_INET_AH=m CONFIG_INET_ESP=m @@ -158,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m CONFIG_NETFILTER_NETLINK_HOOK=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y CONFIG_NF_CONNTRACK_PROCFS=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y @@ -174,17 +176,39 @@ CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m CONFIG_NFT_FIB_INET=m -CONFIG_NETFILTER_XTABLES_COMPAT=y +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NFT_REJECT_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NF_FLOW_TABLE_PROCFS=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m @@ -197,8 +221,10 @@ CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m @@ -207,6 +233,7 @@ CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m CONFIG_NETFILTER_XT_MATCH_CLUSTER=m CONFIG_NETFILTER_XT_MATCH_COMMENT=m CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m @@ -221,6 +248,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m CONFIG_NETFILTER_XT_MATCH_LENGTH=m @@ -238,6 +266,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m CONFIG_NETFILTER_XT_MATCH_STRING=m @@ -293,7 +322,6 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NFT_FIB_IPV6=m @@ -363,7 +391,6 @@ CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m @@ -452,6 +479,7 @@ CONFIG_DM_VERITY=m CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y CONFIG_DM_SWITCH=m CONFIG_DM_INTEGRITY=m +CONFIG_DM_VDO=m CONFIG_NETDEVICES=y CONFIG_BONDING=m CONFIG_DUMMY=m @@ -630,7 +658,6 @@ CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_EXFAT_FS=m CONFIG_NTFS_FS=m -CONFIG_NTFS_RW=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y @@ -649,6 +676,7 @@ CONFIG_SQUASHFS_XZ=y CONFIG_SQUASHFS_ZSTD=y CONFIG_ROMFS_FS=m CONFIG_NFS_FS=m +CONFIG_NFS_V2=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m CONFIG_NFS_SWAP=y diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index c51f3ec4eb28a..8c2b61363bab9 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -9,25 +9,22 @@ CONFIG_BPF_SYSCALL=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y CONFIG_MARCH_Z13=y CONFIG_NR_CPUS=2 CONFIG_HZ_100=y # CONFIG_CHSC_SCH is not set # CONFIG_SCM_BUS is not set +# CONFIG_AP is not set # CONFIG_PFAULT is not set # CONFIG_S390_HYPFS is not set # CONFIG_VIRTUALIZATION is not set # CONFIG_S390_GUEST is not set # CONFIG_SECCOMP is not set -# CONFIG_GCC_PLUGINS is not set # CONFIG_BLOCK_LEGACY_AUTOLOAD is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_SWAP is not set # CONFIG_COMPAT_BRK is not set -# CONFIG_COMPACTION is not set -# CONFIG_MIGRATION is not set CONFIG_NET=y # CONFIG_IUCV is not set # CONFIG_PCPU_DEV_REFCNT is not set From d8073dc6bc04a061660b31e49a990478a73f1883 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Fri, 7 Jun 2024 14:19:48 +0200 Subject: [PATCH 456/537] s390/mm: Allow large pages only for aligned physical addresses Do not allow creation of large pages against physical addresses, which itself are not aligned on the correct boundary. Failure to do so might lead to referencing wrong memory as result of the way DAT works. Fixes: c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address spaces") Reviewed-by: Heiko Carstens Signed-off-by: Alexander Gordeev Signed-off-by: Vasily Gorbik --- arch/s390/boot/vmem.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index 96d48b7112d40..1f7dbb4b66b70 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -267,15 +267,21 @@ static bool large_allowed(enum populate_mode mode) static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end, enum populate_mode mode) { + unsigned long size = end - addr; + return machine.has_edat2 && large_allowed(mode) && - IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE; + IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) && + IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE); } static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end, enum populate_mode mode) { + unsigned long size = end - addr; + return machine.has_edat1 && large_allowed(mode) && - IS_ALIGNED(addr, PMD_SIZE) && (end - addr) >= PMD_SIZE; + IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) && + IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE); } static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end, From 693d41f7c938f92d881e6a51525e6c132a186afd Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Fri, 7 Jun 2024 14:19:50 +0200 Subject: [PATCH 457/537] s390/mm: Restore mapping of kernel image using large pages Since physical and virtual kernel address spaces are uncoupled the kernel image is not mapped using large segment pages anymore, which is a regression. Put the kernel image at the same large segment page offset in physical memory as in virtual memory. Such approach preserves the existing number of bits of entropy used for randomization of the kernel location in virtual memory when KASLR is on. As result, the kernel is mapped using large segment pages. Fixes: c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address spaces") Reported-by: Heiko Carstens Reviewed-by: Heiko Carstens Signed-off-by: Alexander Gordeev Signed-off-by: Vasily Gorbik --- arch/s390/boot/startup.c | 27 ++++++++++++++++++++++++--- arch/s390/boot/vmem.c | 2 +- arch/s390/boot/vmlinux.lds.S | 1 + 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 182aac6a0f779..48ef5fe5c08aa 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -384,7 +384,7 @@ static void fixup_vmlinux_info(void) void startup_kernel(void) { unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size; - unsigned long nokaslr_offset_phys = mem_safe_offset(); + unsigned long nokaslr_offset_phys, kaslr_large_page_offset; unsigned long amode31_lma = 0; unsigned long max_physmem_end; unsigned long asce_limit; @@ -393,6 +393,12 @@ void startup_kernel(void) fixup_vmlinux_info(); setup_lpp(); + + /* + * Non-randomized kernel physical start address must be _SEGMENT_SIZE + * aligned (see blow). + */ + nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE); safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size); /* @@ -425,10 +431,25 @@ void startup_kernel(void) save_ipl_cert_comp_list(); rescue_initrd(safe_addr, ident_map_size); - if (kaslr_enabled()) - __kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size); + /* + * __kaslr_offset_phys must be _SEGMENT_SIZE aligned, so the lower + * 20 bits (the offset within a large page) are zero. Copy the last + * 20 bits of __kaslr_offset, which is THREAD_SIZE aligned, to + * __kaslr_offset_phys. + * + * With this the last 20 bits of __kaslr_offset_phys and __kaslr_offset + * are identical, which is required to allow for large mappings of the + * kernel image. + */ + kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK; + if (kaslr_enabled()) { + unsigned long end = ident_map_size - kaslr_large_page_offset; + + __kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end); + } if (!__kaslr_offset_phys) __kaslr_offset_phys = nokaslr_offset_phys; + __kaslr_offset_phys |= kaslr_large_page_offset; kaslr_adjust_vmlinux_info(__kaslr_offset_phys); physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size); deploy_kernel((void *)__kaslr_offset_phys); diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index 1f7dbb4b66b70..40cfce2687c43 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -261,7 +261,7 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m static bool large_allowed(enum populate_mode mode) { - return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY); + return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL); } static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end, diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S index 1fe5a1d3ff60d..a750711d44c86 100644 --- a/arch/s390/boot/vmlinux.lds.S +++ b/arch/s390/boot/vmlinux.lds.S @@ -109,6 +109,7 @@ SECTIONS #ifdef CONFIG_KERNEL_UNCOMPRESSED . = ALIGN(PAGE_SIZE); . += AMODE31_SIZE; /* .amode31 section */ + . = ALIGN(1 << 20); /* _SEGMENT_SIZE */ #else . = ALIGN(8); #endif From c4ab9da85b9df3692f861512fe6c9812f38b7471 Mon Sep 17 00:00:00 2001 From: Davide Ornaghi Date: Wed, 5 Jun 2024 13:03:45 +0200 Subject: [PATCH 458/537] netfilter: nft_inner: validate mandatory meta and payload Check for mandatory netlink attributes in payload and meta expression when used embedded from the inner expression, otherwise NULL pointer dereference is possible from userspace. Fixes: a150d122b6bd ("netfilter: nft_meta: add inner match support") Fixes: 3a07327d10a0 ("netfilter: nft_inner: support for inner tunnel header matching") Signed-off-by: Davide Ornaghi Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_meta.c | 3 +++ net/netfilter/nft_payload.c | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index ba0d3683a45d3..9139ce38ea7b9 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -839,6 +839,9 @@ static int nft_meta_inner_init(const struct nft_ctx *ctx, struct nft_meta *priv = nft_expr_priv(expr); unsigned int len; + if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG]) + return -EINVAL; + priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY])); switch (priv->key) { case NFT_META_PROTOCOL: diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 0c43d748e23ae..50429cbd42da4 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -650,6 +650,10 @@ static int nft_payload_inner_init(const struct nft_ctx *ctx, struct nft_payload *priv = nft_expr_priv(expr); u32 base; + if (!tb[NFTA_PAYLOAD_BASE] || !tb[NFTA_PAYLOAD_OFFSET] || + !tb[NFTA_PAYLOAD_LEN] || !tb[NFTA_PAYLOAD_DREG]) + return -EINVAL; + base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE])); switch (base) { case NFT_PAYLOAD_TUN_HEADER: From 4e7aaa6b82d63e8ddcbfb56b4fd3d014ca586f10 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Tue, 4 Jun 2024 15:58:03 +0200 Subject: [PATCH 459/537] netfilter: ipset: Fix race between namespace cleanup and gc in the list:set type Lion Ackermann reported that there is a race condition between namespace cleanup in ipset and the garbage collection of the list:set type. The namespace cleanup can destroy the list:set type of sets while the gc of the set type is waiting to run in rcu cleanup. The latter uses data from the destroyed set which thus leads use after free. The patch contains the following parts: - When destroying all sets, first remove the garbage collectors, then wait if needed and then destroy the sets. - Fix the badly ordered "wait then remove gc" for the destroy a single set case. - Fix the missing rcu locking in the list:set type in the userspace test case. - Use proper RCU list handlings in the list:set type. The patch depends on c1193d9bbbd3 (netfilter: ipset: Add list flush to cancel_gc). Fixes: 97f7cf1cd80e (netfilter: ipset: fix performance regression in swap operation) Reported-by: Lion Ackermann Tested-by: Lion Ackermann Signed-off-by: Jozsef Kadlecsik Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipset/ip_set_core.c | 81 +++++++++++++++------------ net/netfilter/ipset/ip_set_list_set.c | 30 +++++----- 2 files changed, 60 insertions(+), 51 deletions(-) diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 3184cc6be4c9d..c7ae4d9bf3d24 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1172,23 +1172,50 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = { .len = IPSET_MAXNAMELEN - 1 }, }; +/* In order to return quickly when destroying a single set, it is split + * into two stages: + * - Cancel garbage collector + * - Destroy the set itself via call_rcu() + */ + static void -ip_set_destroy_set(struct ip_set *set) +ip_set_destroy_set_rcu(struct rcu_head *head) { - pr_debug("set: %s\n", set->name); + struct ip_set *set = container_of(head, struct ip_set, rcu); - /* Must call it without holding any lock */ set->variant->destroy(set); module_put(set->type->me); kfree(set); } static void -ip_set_destroy_set_rcu(struct rcu_head *head) +_destroy_all_sets(struct ip_set_net *inst) { - struct ip_set *set = container_of(head, struct ip_set, rcu); + struct ip_set *set; + ip_set_id_t i; + bool need_wait = false; - ip_set_destroy_set(set); + /* First cancel gc's: set:list sets are flushed as well */ + for (i = 0; i < inst->ip_set_max; i++) { + set = ip_set(inst, i); + if (set) { + set->variant->cancel_gc(set); + if (set->type->features & IPSET_TYPE_NAME) + need_wait = true; + } + } + /* Must wait for flush to be really finished */ + if (need_wait) + rcu_barrier(); + for (i = 0; i < inst->ip_set_max; i++) { + set = ip_set(inst, i); + if (set) { + ip_set(inst, i) = NULL; + set->variant->destroy(set); + module_put(set->type->me); + kfree(set); + } + } } static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, @@ -1202,11 +1229,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, if (unlikely(protocol_min_failed(attr))) return -IPSET_ERR_PROTOCOL; - /* Commands are serialized and references are * protected by the ip_set_ref_lock. * External systems (i.e. xt_set) must call - * ip_set_put|get_nfnl_* functions, that way we + * ip_set_nfnl_get_* functions, that way we * can safely check references here. * * list:set timer can only decrement the reference @@ -1214,8 +1240,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, * without holding the lock. */ if (!attr[IPSET_ATTR_SETNAME]) { - /* Must wait for flush to be really finished in list:set */ - rcu_barrier(); read_lock_bh(&ip_set_ref_lock); for (i = 0; i < inst->ip_set_max; i++) { s = ip_set(inst, i); @@ -1226,15 +1250,7 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, } inst->is_destroyed = true; read_unlock_bh(&ip_set_ref_lock); - for (i = 0; i < inst->ip_set_max; i++) { - s = ip_set(inst, i); - if (s) { - ip_set(inst, i) = NULL; - /* Must cancel garbage collectors */ - s->variant->cancel_gc(s); - ip_set_destroy_set(s); - } - } + _destroy_all_sets(inst); /* Modified by ip_set_destroy() only, which is serialized */ inst->is_destroyed = false; } else { @@ -1255,12 +1271,12 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, features = s->type->features; ip_set(inst, i) = NULL; read_unlock_bh(&ip_set_ref_lock); + /* Must cancel garbage collectors */ + s->variant->cancel_gc(s); if (features & IPSET_TYPE_NAME) { /* Must wait for flush to be really finished */ rcu_barrier(); } - /* Must cancel garbage collectors */ - s->variant->cancel_gc(s); call_rcu(&s->rcu, ip_set_destroy_set_rcu); } return 0; @@ -2365,30 +2381,25 @@ ip_set_net_init(struct net *net) } static void __net_exit -ip_set_net_exit(struct net *net) +ip_set_net_pre_exit(struct net *net) { struct ip_set_net *inst = ip_set_pernet(net); - struct ip_set *set = NULL; - ip_set_id_t i; - inst->is_deleted = true; /* flag for ip_set_nfnl_put */ +} - nfnl_lock(NFNL_SUBSYS_IPSET); - for (i = 0; i < inst->ip_set_max; i++) { - set = ip_set(inst, i); - if (set) { - ip_set(inst, i) = NULL; - set->variant->cancel_gc(set); - ip_set_destroy_set(set); - } - } - nfnl_unlock(NFNL_SUBSYS_IPSET); +static void __net_exit +ip_set_net_exit(struct net *net) +{ + struct ip_set_net *inst = ip_set_pernet(net); + + _destroy_all_sets(inst); kvfree(rcu_dereference_protected(inst->ip_set_list, 1)); } static struct pernet_operations ip_set_net_ops = { .init = ip_set_net_init, + .pre_exit = ip_set_net_pre_exit, .exit = ip_set_net_exit, .id = &ip_set_net_id, .size = sizeof(struct ip_set_net), diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 54e2a1dd7f5f5..bfae7066936bb 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb, struct set_elem *e; int ret; - list_for_each_entry(e, &map->members, list) { + list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; @@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb, struct set_elem *e; int ret; - list_for_each_entry(e, &map->members, list) { + list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; @@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext, struct list_set *map = set->data; struct set_adt_elem *d = value; struct set_elem *e, *next, *prev = NULL; - int ret; + int ret = 0; - list_for_each_entry(e, &map->members, list) { + rcu_read_lock(); + list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; @@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext, if (d->before == 0) { ret = 1; + goto out; } else if (d->before > 0) { next = list_next_entry(e, list); ret = !list_is_last(&e->list, &map->members) && @@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext, } else { ret = prev && prev->id == d->refid; } - return ret; + goto out; } - return 0; +out: + rcu_read_unlock(); + return ret; } static void @@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext, /* Find where to add the new entry */ n = prev = next = NULL; - list_for_each_entry(e, &map->members, list) { + list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; @@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext, { struct list_set *map = set->data; struct set_adt_elem *d = value; - struct set_elem *e, *next, *prev = NULL; + struct set_elem *e, *n, *next, *prev = NULL; - list_for_each_entry(e, &map->members, list) { + list_for_each_entry_safe(e, n, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; @@ -424,14 +428,8 @@ static void list_set_destroy(struct ip_set *set) { struct list_set *map = set->data; - struct set_elem *e, *n; - list_for_each_entry_safe(e, n, &map->members, list) { - list_del(&e->list); - ip_set_put_byindex(map->net, e->id); - ip_set_ext_destroy(set, e); - kfree(e); - } + WARN_ON_ONCE(!list_empty(&map->members)); kfree(map); set->data = NULL; From 6f8f132cc7bac2ac76911e47d5baa378aafda4cb Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 6 Jun 2024 12:23:31 +0200 Subject: [PATCH 460/537] netfilter: Use flowlabel flow key when re-routing mangled packets 'ip6 dscp set $v' in an nftables outpute route chain has no effect. While nftables does detect the dscp change and calls the reroute hook. But ip6_route_me_harder never sets the dscp/flowlabel: flowlabel/dsfield routing rules are ignored and no reroute takes place. Thanks to Yi Chen for an excellent reproducer script that I used to validate this change. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: Yi Chen Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 53d255838e6ab..5d989d803009f 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -36,6 +36,7 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff .flowi6_uid = sock_net_uid(net, sk), .daddr = iph->daddr, .saddr = iph->saddr, + .flowlabel = ip6_flowinfo(iph), }; int err; From b6846826982b9f2f2ad0e79540521b517469ee92 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Jun 2024 16:51:23 +0200 Subject: [PATCH 461/537] thermal: gov_step_wise: Restore passive polling management Consider a thermal zone with one passive trip point, a cooling device with 3 states (0, 1, 2) bound to it, passive polling enabled (nonzero passive_delay_jiffies) and no regular polling (polling_delay_jiffies equal to 0) that is managed by the Step-Wise governor. Suppose that the initial state of the cooling device is 0 and the zone temperature is below the trip point to start with. When the trip point is crossed, tz->passive is incremented by the thermal core and the governor's .manage() callback is invoked. It sets 'throttle' to 'true' for the trip in question and get_target_state() returns 1 for the instance corresponding to the cooling device (say that 'upper' and 'lower' are set to 2 and 0 for it, respectively), so its state changes to 1. Passive polling is still active for the zone, so next time the temperature is updated, the governor's .manage() callback will be invoked again. If the temperature is still rising, it will change the state of the cooling device to 2. Now suppose that next time the zone temperature is updated, it falls below the trip point, so tz->passive is decremented for the zone (say it becomes 0 then) and the governor's .manage() callbacks runs. It finds that the temperature trend for the zone is 'falling' and 'throttle' will be set to 'false' for the trip in question, so the cooling device's state will be changed to 1. However, because tz->polling is 0 for the zone, the governor's .manage() callback may not be invoked again for a long time and the cooling device's state will not be reset back to 0. This can happen because commit 042a3d80f118 ("thermal: core: Move passive polling management to the core") removed passive polling management from the Step-Wise governor. Before that change, thermal_zone_trip_update() would bump up tz->passive when changing the target state for a thermal instance from "no target" to a specific value and it would drop tz->passive when changing it back to "no target" which would cause passive polling to be active for the zone until the governor has reset the states of all cooling devices. In particular, in the example above tz->passive would be incremented when changing the state of the cooling device from 0 to 1 and then it would be still nonzero when the state of the cooling device was changed from 2 to 1. To prevent this problem from occurring, restore the passive polling management in the Step-Wise governor by partially reverting the commit in question and update the comment in the restored code to explain its role more clearly. Fixes: 042a3d80f118 ("thermal: core: Move passive polling management to the core") Closes: https://lore.kernel.org/linux-pm/ZmVfcEOxmjUHZTSX@hovoldconsulting.com Reported-by: Johan Hovold Tested-by: Johan Hovold Signed-off-by: Rafael J. Wysocki --- drivers/thermal/gov_step_wise.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c index e0fdc497bfcce..65974fe8be0db 100644 --- a/drivers/thermal/gov_step_wise.c +++ b/drivers/thermal/gov_step_wise.c @@ -93,6 +93,23 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, if (instance->initialized && old_target == instance->target) continue; + if (trip->type == THERMAL_TRIP_PASSIVE) { + /* + * If the target state for this thermal instance + * changes from THERMAL_NO_TARGET to something else, + * ensure that the zone temperature will be updated + * (assuming enabled passive cooling) until it becomes + * THERMAL_NO_TARGET again, or the cooling device may + * not be reset to its initial state. + */ + if (old_target == THERMAL_NO_TARGET && + instance->target != THERMAL_NO_TARGET) + tz->passive++; + else if (old_target != THERMAL_NO_TARGET && + instance->target == THERMAL_NO_TARGET) + tz->passive--; + } + instance->initialized = true; mutex_lock(&instance->cdev->lock); From 017ed5e70c88bf2f1e0952bab6f53a53a028e561 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sat, 18 May 2024 00:26:17 +0100 Subject: [PATCH 462/537] drm/nouveau: remove unused struct 'init_exec' 'init_exec' is unused since commit cb75d97e9c77 ("drm/nouveau: implement devinit subdev, and new init table parser") Remove it. Signed-off-by: Dr. David Alan Gilbert Acked-by: Danilo Krummrich Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20240517232617.230767-1-linux@treblig.org --- drivers/gpu/drm/nouveau/nouveau_bios.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 79cfab53f80e2..8c3c1f1e01c58 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -43,11 +43,6 @@ #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg) #define LOG_OLD_VALUE(x) -struct init_exec { - bool execute; - bool repeat; -}; - static bool nv_cksum(const uint8_t *data, unsigned int length) { /* From f2736b9c791a126ecb9cfc1aef1c7b4152b66e2d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 11 Jun 2024 18:59:08 -0400 Subject: [PATCH 463/537] bcachefs: Fix rcu_read_lock() leak in drop_extra_replicas Signed-off-by: Kent Overstreet --- fs/bcachefs/data_update.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 0d807c2ce9c67..1a0072eef109b 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -202,9 +202,8 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i)); /* Now, drop excess replicas: */ -restart_drop_extra_replicas: - rcu_read_lock(); +restart_drop_extra_replicas: bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) { unsigned ptr_durability = bch2_extent_ptr_durability(c, &p); From 8c860ed825cb85f6672cd7b10a8f33e3498a7c81 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 10 Jun 2024 14:02:27 -0700 Subject: [PATCH 464/537] x86/uaccess: Fix missed zeroing of ia32 u64 get_user() range checking When reworking the range checking for get_user(), the get_user_8() case on 32-bit wasn't zeroing the high register. (The jump to bad_get_user_8 was accidentally dropped.) Restore the correct error handling destination (and rename the jump to using the expected ".L" prefix). While here, switch to using a named argument ("size") for the call template ("%c4" to "%c[size]") as already used in the other call templates in this file. Found after moving the usercopy selftests to KUnit: # usercopy_test_invalid: EXPECTATION FAILED at lib/usercopy_kunit.c:278 Expected val_u64 == 0, but val_u64 == -60129542144 (0xfffffff200000000) Closes: https://lore.kernel.org/all/CABVgOSn=tb=Lj9SxHuT4_9MTjjKVxsq-ikdXC4kGHO4CfKVmGQ@mail.gmail.com Fixes: b19b74bc99b1 ("x86/mm: Rework address range check in get_user() and put_user()") Reported-by: David Gow Signed-off-by: Kees Cook Signed-off-by: Dave Hansen Reviewed-by: Kirill A. Shutemov Reviewed-by: Qiuxu Zhuo Tested-by: David Gow Link: https://lore.kernel.org/all/20240610210213.work.143-kees%40kernel.org --- arch/x86/include/asm/uaccess.h | 4 ++-- arch/x86/lib/getuser.S | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 0f9bab92a43d7..3a7755c1a4410 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -78,10 +78,10 @@ extern int __get_user_bad(void); int __ret_gu; \ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ __chk_user_ptr(ptr); \ - asm volatile("call __" #fn "_%c4" \ + asm volatile("call __" #fn "_%c[size]" \ : "=a" (__ret_gu), "=r" (__val_gu), \ ASM_CALL_CONSTRAINT \ - : "0" (ptr), "i" (sizeof(*(ptr)))); \ + : "0" (ptr), [size] "i" (sizeof(*(ptr)))); \ instrument_get_user(__val_gu); \ (x) = (__force __typeof__(*(ptr))) __val_gu; \ __builtin_expect(__ret_gu, 0); \ diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 10d5ed8b5990f..a1cb3a4e6742d 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -44,7 +44,11 @@ or %rdx, %rax .else cmp $TASK_SIZE_MAX-\size+1, %eax +.if \size != 8 jae .Lbad_get_user +.else + jae .Lbad_get_user_8 +.endif sbb %edx, %edx /* array_index_mask_nospec() */ and %edx, %eax .endif @@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception) #ifdef CONFIG_X86_32 SYM_CODE_START_LOCAL(__get_user_8_handle_exception) ASM_CLAC -bad_get_user_8: +.Lbad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX From 72d95924ee35c8cd16ef52f912483ee938a34d49 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Mon, 10 Jun 2024 18:47:07 +0000 Subject: [PATCH 465/537] parisc: Try to fix random segmentation faults in package builds PA-RISC systems with PA8800 and PA8900 processors have had problems with random segmentation faults for many years. Systems with earlier processors are much more stable. Systems with PA8800 and PA8900 processors have a large L2 cache which needs per page flushing for decent performance when a large range is flushed. The combined cache in these systems is also more sensitive to non-equivalent aliases than the caches in earlier systems. The majority of random segmentation faults that I have looked at appear to be memory corruption in memory allocated using mmap and malloc. My first attempt at fixing the random faults didn't work. On reviewing the cache code, I realized that there were two issues which the existing code didn't handle correctly. Both relate to cache move-in. Another issue is that the present bit in PTEs is racy. 1) PA-RISC caches have a mind of their own and they can speculatively load data and instructions for a page as long as there is a entry in the TLB for the page which allows move-in. TLBs are local to each CPU. Thus, the TLB entry for a page must be purged before flushing the page. This is particularly important on SMP systems. In some of the flush routines, the flush routine would be called and then the TLB entry would be purged. This was because the flush routine needed the TLB entry to do the flush. 2) My initial approach to trying the fix the random faults was to try and use flush_cache_page_if_present for all flush operations. This actually made things worse and led to a couple of hardware lockups. It finally dawned on me that some lines weren't being flushed because the pte check code was racy. This resulted in random inequivalent mappings to physical pages. The __flush_cache_page tmpalias flush sets up its own TLB entry and it doesn't need the existing TLB entry. As long as we can find the pte pointer for the vm page, we can get the pfn and physical address of the page. We can also purge the TLB entry for the page before doing the flush. Further, __flush_cache_page uses a special TLB entry that inhibits cache move-in. When switching page mappings, we need to ensure that lines are removed from the cache. It is not sufficient to just flush the lines to memory as they may come back. This made it clear that we needed to implement all the required flush operations using tmpalias routines. This includes flushes for user and kernel pages. After modifying the code to use tmpalias flushes, it became clear that the random segmentation faults were not fully resolved. The frequency of faults was worse on systems with a 64 MB L2 (PA8900) and systems with more CPUs (rp4440). The warning that I added to flush_cache_page_if_present to detect pages that couldn't be flushed triggered frequently on some systems. Helge and I looked at the pages that couldn't be flushed and found that the PTE was either cleared or for a swap page. Ignoring pages that were swapped out seemed okay but pages with cleared PTEs seemed problematic. I looked at routines related to pte_clear and noticed ptep_clear_flush. The default implementation just flushes the TLB entry. However, it was obvious that on parisc we need to flush the cache page as well. If we don't flush the cache page, stale lines will be left in the cache and cause random corruption. Once a PTE is cleared, there is no way to find the physical address associated with the PTE and flush the associated page at a later time. I implemented an updated change with a parisc specific version of ptep_clear_flush. It fixed the random data corruption on Helge's rp4440 and rp3440, as well as on my c8000. At this point, I realized that I could restore the code where we only flush in flush_cache_page_if_present if the page has been accessed. However, for this, we also need to flush the cache when the accessed bit is cleared in ptep_clear_flush_young to keep things synchronized. The default implementation only flushes the TLB entry. Other changes in this version are: 1) Implement parisc specific version of ptep_get. It's identical to default but needed in arch/parisc/include/asm/pgtable.h. 2) Revise parisc implementation of ptep_test_and_clear_young to use ptep_get (READ_ONCE). 3) Drop parisc implementation of ptep_get_and_clear. We can use default. 4) Revise flush_kernel_vmap_range and invalidate_kernel_vmap_range to use full data cache flush. 5) Move flush_cache_vmap and flush_cache_vunmap to cache.c. Handle VM_IOREMAP case in flush_cache_vmap. At this time, I don't know whether it is better to always flush when the PTE present bit is set or when both the accessed and present bits are set. The later saves flushing pages that haven't been accessed, but we need to flush in ptep_clear_flush_young. It also needs a page table lookup to find the PTE pointer. The lpa instruction only needs a page table lookup when the PTE entry isn't in the TLB. We don't atomically handle setting and clearing the _PAGE_ACCESSED bit. If we miss an update, we may miss a flush and the cache may get corrupted. Whether the current code is effectively atomic depends on process control. When CONFIG_FLUSH_PAGE_ACCESSED is set to zero, the page will eventually be flushed when the PTE is cleared or in flush_cache_page_if_present. The _PAGE_ACCESSED bit is not used, so the problem is avoided. The flush method can be selected using the CONFIG_FLUSH_PAGE_ACCESSED define in cache.c. The default is 0. I didn't see a large difference in performance. Signed-off-by: John David Anglin Cc: # v6.6+ Signed-off-by: Helge Deller --- arch/parisc/include/asm/cacheflush.h | 15 +- arch/parisc/include/asm/pgtable.h | 27 +- arch/parisc/kernel/cache.c | 413 +++++++++++++++++---------- 3 files changed, 275 insertions(+), 180 deletions(-) diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index ba4c05bc24d69..8394718870e1a 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -31,18 +31,17 @@ void flush_cache_all_local(void); void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); -void flush_kernel_dcache_page_addr(const void *addr); - #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); +/* The only way to flush a vmap range is to flush whole cache */ #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 void flush_kernel_vmap_range(void *vaddr, int size); void invalidate_kernel_vmap_range(void *vaddr, int size); -#define flush_cache_vmap(start, end) flush_cache_all() +void flush_cache_vmap(unsigned long start, unsigned long end); #define flush_cache_vmap_early(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) flush_cache_all() +void flush_cache_vunmap(unsigned long start, unsigned long end); void flush_dcache_folio(struct folio *folio); #define flush_dcache_folio flush_dcache_folio @@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -/* defined in pacache.S exported in cache.c used by flush_anon_page */ -void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); - #define ARCH_HAS_FLUSH_ANON_PAGE void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr); #define ARCH_HAS_FLUSH_ON_KUNMAP -static inline void kunmap_flush_on_unmap(const void *addr) -{ - flush_kernel_dcache_page_addr(addr); -} +void kunmap_flush_on_unmap(const void *addr); #endif /* _PARISC_CACHEFLUSH_H */ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 974accac05cd3..babf65751e818 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) return pte; } +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} +#define ptep_get ptep_get + static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; - if (!pte_young(*ptep)) - return 0; - - pte = *ptep; + pte = ptep_get(ptep); if (!pte_young(pte)) { return 0; } @@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned return 1; } -struct mm_struct; -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t old_pte; - - old_pte = *ptep; - set_pte(ptep, __pte(0)); - - return old_pte; -} +int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { set_pte(ptep, pte_wrprotect(*ptep)); @@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 422f3e1e6d9ca..483bfafd930cd 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -31,20 +32,31 @@ #include #include +#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE) + +/* + * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number + * of page flushes done flush_cache_page_if_present. There are some + * pros and cons in using this option. It may increase the risk of + * random segmentation faults. + */ +#define CONFIG_FLUSH_PAGE_ACCESSED 0 + int split_tlb __ro_after_init; int dcache_stride __ro_after_init; int icache_stride __ro_after_init; EXPORT_SYMBOL(dcache_stride); +/* Internal implementation in arch/parisc/kernel/pacache.S */ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); EXPORT_SYMBOL(flush_dcache_page_asm); void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); - -/* Internal implementation in arch/parisc/kernel/pacache.S */ void flush_data_cache_local(void *); /* flushes local data-cache only */ void flush_instruction_cache_local(void); /* flushes local code-cache only */ +static void flush_kernel_dcache_page_addr(const void *addr); + /* On some machines (i.e., ones with the Merced bus), there can be * only a single PxTLB broadcast at a time; this must be guaranteed * by software. We need a spinlock around all TLB flushes to ensure @@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, { if (!static_branch_likely(&parisc_has_cache)) return; + + /* + * The TLB is the engine of coherence on parisc. The CPU is + * entitled to speculate any page with a TLB mapping, so here + * we kill the mapping then flush the page along a special flush + * only alias mapping. This guarantees that the page is no-longer + * in the cache for any process and nor may it be speculatively + * read in (until the user or kernel specifically accesses it, + * of course). + */ + flush_tlb_page(vma, vmaddr); + preempt_disable(); flush_dcache_page_asm(physaddr, vmaddr); if (vma->vm_flags & VM_EXEC) @@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, preempt_enable(); } -static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) +static void flush_kernel_dcache_page_addr(const void *addr) { - unsigned long flags, space, pgd, prot; -#ifdef CONFIG_TLB_PTLOCK - unsigned long pgd_lock; -#endif + unsigned long vaddr = (unsigned long)addr; + unsigned long flags; - vmaddr &= PAGE_MASK; + /* Purge TLB entry to remove translation on all CPUs */ + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); + /* Use tmpalias flush to prevent data cache move-in */ preempt_disable(); + flush_dcache_page_asm(__pa(vaddr), vaddr); + preempt_enable(); +} - /* Set context for flush */ - local_irq_save(flags); - prot = mfctl(8); - space = mfsp(SR_USER); - pgd = mfctl(25); -#ifdef CONFIG_TLB_PTLOCK - pgd_lock = mfctl(28); -#endif - switch_mm_irqs_off(NULL, vma->vm_mm, NULL); - local_irq_restore(flags); - - flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE); - flush_tlb_page(vma, vmaddr); +static void flush_kernel_icache_page_addr(const void *addr) +{ + unsigned long vaddr = (unsigned long)addr; + unsigned long flags; - /* Restore previous context */ - local_irq_save(flags); -#ifdef CONFIG_TLB_PTLOCK - mtctl(pgd_lock, 28); -#endif - mtctl(pgd, 25); - mtsp(space, SR_USER); - mtctl(prot, 8); - local_irq_restore(flags); + /* Purge TLB entry to remove translation on all CPUs */ + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); + /* Use tmpalias flush to prevent instruction cache move-in */ + preempt_disable(); + flush_icache_page_asm(__pa(vaddr), vaddr); preempt_enable(); } +void kunmap_flush_on_unmap(const void *addr) +{ + flush_kernel_dcache_page_addr(addr); +} +EXPORT_SYMBOL(kunmap_flush_on_unmap); + void flush_icache_pages(struct vm_area_struct *vma, struct page *page, unsigned int nr) { @@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page, for (;;) { flush_kernel_dcache_page_addr(kaddr); - flush_kernel_icache_page(kaddr); + flush_kernel_icache_page_addr(kaddr); if (--nr == 0) break; kaddr += PAGE_SIZE; } } +/* + * Walk page directory for MM to find PTEP pointer for address ADDR. + */ static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) { pte_t *ptep = NULL; @@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte) == (_PAGE_PRESENT | _PAGE_ACCESSED); } +/* + * Return user physical address. Returns 0 if page is not present. + */ +static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr) +{ + unsigned long flags, space, pgd, prot, pa; +#ifdef CONFIG_TLB_PTLOCK + unsigned long pgd_lock; +#endif + + /* Save context */ + local_irq_save(flags); + prot = mfctl(8); + space = mfsp(SR_USER); + pgd = mfctl(25); +#ifdef CONFIG_TLB_PTLOCK + pgd_lock = mfctl(28); +#endif + + /* Set context for lpa_user */ + switch_mm_irqs_off(NULL, mm, NULL); + pa = lpa_user(addr); + + /* Restore previous context */ +#ifdef CONFIG_TLB_PTLOCK + mtctl(pgd_lock, 28); +#endif + mtctl(pgd, 25); + mtsp(space, SR_USER); + mtctl(prot, 8); + local_irq_restore(flags); + + return pa; +} + void flush_dcache_folio(struct folio *folio) { struct address_space *mapping = folio_flush_mapping(folio); @@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio) if (addr + nr * PAGE_SIZE > vma->vm_end) nr = (vma->vm_end - addr) / PAGE_SIZE; - if (parisc_requires_coherency()) { - for (i = 0; i < nr; i++) { - pte_t *ptep = get_ptep(vma->vm_mm, - addr + i * PAGE_SIZE); - if (!ptep) - continue; - if (pte_needs_flush(*ptep)) - flush_user_cache_page(vma, - addr + i * PAGE_SIZE); - /* Optimise accesses to the same table? */ - pte_unmap(ptep); - } - } else { + if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) + != (addr & (SHM_COLOUR - 1))) { + for (i = 0; i < nr; i++) + __flush_cache_page(vma, + addr + i * PAGE_SIZE, + (pfn + i) * PAGE_SIZE); /* - * The TLB is the engine of coherence on parisc: - * The CPU is entitled to speculate any page - * with a TLB mapping, so here we kill the - * mapping then flush the page along a special - * flush only alias mapping. This guarantees that - * the page is no-longer in the cache for any - * process and nor may it be speculatively read - * in (until the user or kernel specifically - * accesses it, of course) + * Software is allowed to have any number + * of private mappings to a page. */ - for (i = 0; i < nr; i++) - flush_tlb_page(vma, addr + i * PAGE_SIZE); - if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) - != (addr & (SHM_COLOUR - 1))) { - for (i = 0; i < nr; i++) - __flush_cache_page(vma, - addr + i * PAGE_SIZE, - (pfn + i) * PAGE_SIZE); - /* - * Software is allowed to have any number - * of private mappings to a page. - */ - if (!(vma->vm_flags & VM_SHARED)) - continue; - if (old_addr) - pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", - old_addr, addr, vma->vm_file); - if (nr == folio_nr_pages(folio)) - old_addr = addr; - } + if (!(vma->vm_flags & VM_SHARED)) + continue; + if (old_addr) + pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", + old_addr, addr, vma->vm_file); + if (nr == folio_nr_pages(folio)) + old_addr = addr; } WARN_ON(++count == 4096); } @@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long); extern void clear_user_page_asm(void *, unsigned long); extern void copy_user_page_asm(void *, void *, unsigned long); -void flush_kernel_dcache_page_addr(const void *addr) -{ - unsigned long flags; - - flush_kernel_dcache_page_asm(addr); - purge_tlb_start(flags); - pdtlb(SR_KERNEL, addr); - purge_tlb_end(flags); -} -EXPORT_SYMBOL(flush_kernel_dcache_page_addr); - static void flush_cache_page_if_present(struct vm_area_struct *vma, - unsigned long vmaddr, unsigned long pfn) + unsigned long vmaddr) { +#if CONFIG_FLUSH_PAGE_ACCESSED bool needs_flush = false; - pte_t *ptep; + pte_t *ptep, pte; - /* - * The pte check is racy and sometimes the flush will trigger - * a non-access TLB miss. Hopefully, the page has already been - * flushed. - */ ptep = get_ptep(vma->vm_mm, vmaddr); if (ptep) { - needs_flush = pte_needs_flush(*ptep); + pte = ptep_get(ptep); + needs_flush = pte_needs_flush(pte); pte_unmap(ptep); } if (needs_flush) - flush_cache_page(vma, vmaddr, pfn); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte))); +#else + struct mm_struct *mm = vma->vm_mm; + unsigned long physaddr = get_upa(mm, vmaddr); + + if (physaddr) + __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr)); +#endif } void copy_user_highpage(struct page *to, struct page *from, @@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from, kfrom = kmap_local_page(from); kto = kmap_local_page(to); - flush_cache_page_if_present(vma, vaddr, page_to_pfn(from)); + __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from))); copy_page_asm(kto, kfrom); kunmap_local(kto); kunmap_local(kfrom); @@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from, void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long user_vaddr, void *dst, void *src, int len) { - flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); memcpy(dst, src, len); - flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst)); } void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long user_vaddr, void *dst, void *src, int len) { - flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page)); + __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page))); memcpy(dst, src, len); + flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src)); } /* __flush_tlb_range() @@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start, static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned long addr, pfn; - pte_t *ptep; - - for (addr = start; addr < end; addr += PAGE_SIZE) { - bool needs_flush = false; - /* - * The vma can contain pages that aren't present. Although - * the pte search is expensive, we need the pte to find the - * page pfn and to check whether the page should be flushed. - */ - ptep = get_ptep(vma->vm_mm, addr); - if (ptep) { - needs_flush = pte_needs_flush(*ptep); - pfn = pte_pfn(*ptep); - pte_unmap(ptep); - } - if (needs_flush) { - if (parisc_requires_coherency()) { - flush_user_cache_page(vma, addr); - } else { - if (WARN_ON(!pfn_valid(pfn))) - return; - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); - } - } - } + unsigned long addr; + + for (addr = start; addr < end; addr += PAGE_SIZE) + flush_cache_page_if_present(vma, addr); } static inline unsigned long mm_total_size(struct mm_struct *mm) @@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) return; flush_tlb_range(vma, start, end); - flush_cache_all(); + if (vma->vm_flags & VM_EXEC) + flush_cache_all(); + else + flush_data_cache(); return; } - flush_cache_pages(vma, start, end); + flush_cache_pages(vma, start & PAGE_MASK, end); } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { - if (WARN_ON(!pfn_valid(pfn))) - return; - if (parisc_requires_coherency()) - flush_user_cache_page(vma, vmaddr); - else - __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) @@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon if (!PageAnon(page)) return; - if (parisc_requires_coherency()) { - if (vma->vm_flags & VM_SHARED) - flush_data_cache(); - else - flush_user_cache_page(vma, vmaddr); + __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page))); +} + +int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + pte_t pte = ptep_get(ptep); + + if (!pte_young(pte)) + return 0; + set_pte(ptep, pte_mkold(pte)); +#if CONFIG_FLUSH_PAGE_ACCESSED + __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte))); +#endif + return 1; +} + +/* + * After a PTE is cleared, we have no way to flush the cache for + * the physical page. On PA8800 and PA8900 processors, these lines + * can cause random cache corruption. Thus, we must flush the cache + * as well as the TLB when clearing a PTE that's valid. + */ +pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + struct mm_struct *mm = (vma)->vm_mm; + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + unsigned long pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) + __flush_cache_page(vma, addr, PFN_PHYS(pfn)); + else if (pte_accessible(mm, pte)) + flush_tlb_page(vma, addr); + + return pte; +} + +/* + * The physical address for pages in the ioremap case can be obtained + * from the vm_struct struct. I wasn't able to successfully handle the + * vmalloc and vmap cases. We have an array of struct page pointers in + * the uninitialized vmalloc case but the flush failed using page_to_pfn. + */ +void flush_cache_vmap(unsigned long start, unsigned long end) +{ + unsigned long addr, physaddr; + struct vm_struct *vm; + + /* Prevent cache move-in */ + flush_tlb_kernel_range(start, end); + + if (end - start >= parisc_cache_flush_threshold) { + flush_cache_all(); return; } - flush_tlb_page(vma, vmaddr); - preempt_disable(); - flush_dcache_page_asm(page_to_phys(page), vmaddr); - preempt_enable(); + if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) { + flush_cache_all(); + return; + } + + vm = find_vm_area((void *)start); + if (WARN_ON_ONCE(!vm)) { + flush_cache_all(); + return; + } + + /* The physical addresses of IOREMAP regions are contiguous */ + if (vm->flags & VM_IOREMAP) { + physaddr = vm->phys_addr; + for (addr = start; addr < end; addr += PAGE_SIZE) { + preempt_disable(); + flush_dcache_page_asm(physaddr, start); + flush_icache_page_asm(physaddr, start); + preempt_enable(); + physaddr += PAGE_SIZE; + } + return; + } + + flush_cache_all(); } +EXPORT_SYMBOL(flush_cache_vmap); +/* + * The vm_struct has been retired and the page table is set up. The + * last page in the range is a guard page. Its physical address can't + * be determined using lpa, so there is no way to flush the range + * using flush_dcache_page_asm. + */ +void flush_cache_vunmap(unsigned long start, unsigned long end) +{ + /* Prevent cache move-in */ + flush_tlb_kernel_range(start, end); + flush_data_cache(); +} +EXPORT_SYMBOL(flush_cache_vunmap); + +/* + * On systems with PA8800/PA8900 processors, there is no way to flush + * a vmap range other than using the architected loop to flush the + * entire cache. The page directory is not set up, so we can't use + * fdc, etc. FDCE/FICE don't work to flush a portion of the cache. + * L2 is physically indexed but FDCE/FICE instructions in virtual + * mode output their virtual address on the core bus, not their + * real address. As a result, the L2 cache index formed from the + * virtual address will most likely not be the same as the L2 index + * formed from the real address. + */ void flush_kernel_vmap_range(void *vaddr, int size) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - (unsigned long)size >= parisc_cache_flush_threshold) { - flush_tlb_kernel_range(start, end); - flush_data_cache(); + flush_tlb_kernel_range(start, end); + + if (!static_branch_likely(&parisc_has_dcache)) + return; + + /* If interrupts are disabled, we can only do local flush */ + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { + flush_data_cache_local(NULL); return; } - flush_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + flush_data_cache(); } EXPORT_SYMBOL(flush_kernel_vmap_range); @@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) /* Ensure DMA is complete */ asm_syncdma(); - if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - (unsigned long)size >= parisc_cache_flush_threshold) { - flush_tlb_kernel_range(start, end); - flush_data_cache(); + flush_tlb_kernel_range(start, end); + + if (!static_branch_likely(&parisc_has_dcache)) + return; + + /* If interrupts are disabled, we can only do local flush */ + if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) { + flush_data_cache_local(NULL); return; } - purge_kernel_dcache_range_asm(start, end); - flush_tlb_kernel_range(start, end); + flush_data_cache(); } EXPORT_SYMBOL(invalidate_kernel_vmap_range); From 52912ca87e2b810e5acdcdc452593d30c9187d8f Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Fri, 7 Jun 2024 10:25:07 +0900 Subject: [PATCH 466/537] scsi: core: Disable CDL by default For SCSI devices supporting the Command Duration Limits feature set, the user can enable/disable this feature use through the sysfs device attribute "cdl_enable". This attribute modification triggers a call to scsi_cdl_enable() to enable and disable the feature for ATA devices and set the scsi device cdl_enable field to the user provided bool value. For SCSI devices supporting CDL, the feature set is always enabled and scsi_cdl_enable() is reduced to setting the cdl_enable field. However, for ATA devices, a drive may spin-up with the CDL feature enabled by default. But the SCSI device cdl_enable field is always initialized to false (CDL disabled), regardless of the actual device CDL feature state. For ATA devices managed by libata (or libsas), libata-core always disables the CDL feature set when the device is attached, thus syncing the state of the CDL feature on the device and of the SCSI device cdl_enable field. However, for ATA devices connected to a SAS HBA, the CDL feature is not disabled on scan for ATA devices that have this feature enabled by default, leading to an inconsistent state of the feature on the device with the SCSI device cdl_enable field. Avoid this inconsistency by adding a call to scsi_cdl_enable() in scsi_cdl_check() to make sure that the device-side state of the CDL feature set always matches the scsi device cdl_enable field state. This implies that CDL will always be disabled for ATA devices connected to SAS HBAs, which is consistent with libata/libsas initialization of the device. Reported-by: Scott McCoy Fixes: 1b22cfb14142 ("scsi: core: Allow enabling and disabling command duration limits") Cc: stable@vger.kernel.org Signed-off-by: Damien Le Moal Link: https://lore.kernel.org/r/20240607012507.111488-1-dlemoal@kernel.org Reviewed-by: Niklas Cassel Reviewed-by: Igor Pylypiv Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index f0464db3f9de9..ee69bd35889d1 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -673,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev) sdev->use_10_for_rw = 0; sdev->cdl_supported = 1; + + /* + * If the device supports CDL, make sure that the current drive + * feature status is consistent with the user controlled + * cdl_enable state. + */ + scsi_cdl_enable(sdev, sdev->cdl_enable); } else { sdev->cdl_supported = 0; } From 77691af484e28af7a692e511b9ed5ca63012ec6e Mon Sep 17 00:00:00 2001 From: Ziqi Chen Date: Fri, 7 Jun 2024 18:06:23 +0800 Subject: [PATCH 467/537] scsi: ufs: core: Quiesce request queues before checking pending cmds In ufshcd_clock_scaling_prepare(), after SCSI layer is blocked, ufshcd_pending_cmds() is called to check whether there are pending transactions or not. And only if there are no pending transactions can we proceed to kickstart the clock scaling sequence. ufshcd_pending_cmds() traverses over all SCSI devices and calls sbitmap_weight() on their budget_map. sbitmap_weight() can be broken down to three steps: 1. Calculate the nr outstanding bits set in the 'word' bitmap. 2. Calculate the nr outstanding bits set in the 'cleared' bitmap. 3. Subtract the result from step 1 by the result from step 2. This can lead to a race condition as outlined below: Assume there is one pending transaction in the request queue of one SCSI device, say sda, and the budget token of this request is 0, the 'word' is 0x1 and the 'cleared' is 0x0. 1. When step 1 executes, it gets the result as 1. 2. Before step 2 executes, block layer tries to dispatch a new request to sda. Since the SCSI layer is blocked, the request cannot pass through SCSI but the block layer would do budget_get() and budget_put() to sda's budget map regardless, so the 'word' has become 0x3 and 'cleared' has become 0x2 (assume the new request got budget token 1). 3. When step 2 executes, it gets the result as 1. 4. When step 3 executes, it gets the result as 0, meaning there is no pending transactions, which is wrong. Thread A Thread B ufshcd_pending_cmds() __blk_mq_sched_dispatch_requests() | | sbitmap_weight(word) | | scsi_mq_get_budget() | | | scsi_mq_put_budget() | | sbitmap_weight(cleared) ... When this race condition happens, the clock scaling sequence is started with transactions still in flight, leading to subsequent hibernate enter failure, broken link, task abort and back to back error recovery. Fix this race condition by quiescing the request queues before calling ufshcd_pending_cmds() so that block layer won't touch the budget map when ufshcd_pending_cmds() is working on it. In addition, remove the SCSI layer blocking/unblocking to reduce redundancies and latencies. Fixes: 8d077ede48c1 ("scsi: ufs: Optimize the command queueing code") Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Ziqi Chen Link: https://lore.kernel.org/r/1717754818-39863-1-git-send-email-quic_ziqichen@quicinc.com Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 0cf07194bbe89..e5e9da61f15d0 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1366,7 +1366,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) * make sure that there are no outstanding requests when * clock scaling is in progress */ - ufshcd_scsi_block_requests(hba); + blk_mq_quiesce_tagset(&hba->host->tag_set); mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); @@ -1375,7 +1375,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) ret = -EBUSY; up_write(&hba->clk_scaling_lock); mutex_unlock(&hba->wb_mutex); - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); goto out; } @@ -1396,7 +1396,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc mutex_unlock(&hba->wb_mutex); - ufshcd_scsi_unblock_requests(hba); + blk_mq_unquiesce_tagset(&hba->host->tag_set); ufshcd_release(hba); } From 90e6f08915ec6efe46570420412a65050ec826b2 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Tue, 11 Jun 2024 17:34:35 +0900 Subject: [PATCH 468/537] scsi: mpi3mr: Fix ATA NCQ priority support The function mpi3mr_qcmd() of the mpi3mr driver is able to indicate to the HBA if a read or write command directed at an ATA device should be translated to an NCQ read/write command with the high prioiryt bit set when the request uses the RT priority class and the user has enabled NCQ priority through sysfs. However, unlike the mpt3sas driver, the mpi3mr driver does not define the sas_ncq_prio_supported and sas_ncq_prio_enable sysfs attributes, so the ncq_prio_enable field of struct mpi3mr_sdev_priv_data is never actually set and NCQ Priority cannot ever be used. Fix this by defining these missing atributes to allow a user to check if an ATA device supports NCQ priority and to enable/disable the use of NCQ priority. To do this, lift the function scsih_ncq_prio_supp() out of the mpt3sas driver and make it the generic SCSI SAS transport function sas_ata_ncq_prio_supported(). Nothing in that function is hardware specific, so this function can be used in both the mpt3sas driver and the mpi3mr driver. Reported-by: Scott McCoy Fixes: 023ab2a9b4ed ("scsi: mpi3mr: Add support for queue command processing") Cc: stable@vger.kernel.org Signed-off-by: Damien Le Moal Link: https://lore.kernel.org/r/20240611083435.92961-1-dlemoal@kernel.org Reviewed-by: Niklas Cassel Signed-off-by: Martin K. Petersen --- drivers/scsi/mpi3mr/mpi3mr_app.c | 62 ++++++++++++++++++++++++++++ drivers/scsi/mpt3sas/mpt3sas_base.h | 3 -- drivers/scsi/mpt3sas/mpt3sas_ctl.c | 4 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 23 ----------- drivers/scsi/scsi_transport_sas.c | 23 +++++++++++ include/scsi/scsi_transport_sas.h | 2 + 6 files changed, 89 insertions(+), 28 deletions(-) diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 1638109a68a0b..cd261b48eb46d 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -2163,10 +2163,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(persistent_id); +/** + * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_supported attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read-only' sdev attribute, only works with SATA devices + */ +static ssize_t +sas_ncq_prio_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); +} +static DEVICE_ATTR_RO(sas_ncq_prio_supported); + +/** + * sas_ncq_prio_enable_show - send prioritized io commands to device + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_enable attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read/write' sdev attribute, only works with SATA devices + */ +static ssize_t +sas_ncq_prio_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + + if (!sdev_priv_data) + return 0; + + return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; + bool ncq_prio_enable = 0; + + if (kstrtobool(buf, &ncq_prio_enable)) + return -EINVAL; + + if (!sas_ata_ncq_prio_supported(sdev)) + return -EINVAL; + + sdev_priv_data->ncq_prio_enable = ncq_prio_enable; + + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + static struct attribute *mpi3mr_dev_attrs[] = { &dev_attr_sas_address.attr, &dev_attr_device_handle.attr, &dev_attr_persistent_id.attr, + &dev_attr_sas_ncq_prio_supported.attr, + &dev_attr_sas_ncq_prio_enable.attr, NULL, }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index bf100a4ebfc36..fe1e96fda284b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -2048,9 +2048,6 @@ void mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request); -/* NCQ Prio Handling Check */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev); - void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_init_debugfs(void); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 1c9fd26195b81..87784c96249a7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev, { struct scsi_device *sdev = to_scsi_device(dev); - return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev)); + return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); } static DEVICE_ATTR_RO(sas_ncq_prio_supported); @@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev, if (kstrtobool(buf, &ncq_prio_enable)) return -EINVAL; - if (!scsih_ncq_prio_supp(sdev)) + if (!sas_ata_ncq_prio_supported(sdev)) return -EINVAL; sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 12d08d8ba5382..870ec2cb4af4b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -12571,29 +12571,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } -/** - * scsih_ncq_prio_supp - Check for NCQ command priority support - * @sdev: scsi device struct - * - * This is called when a user indicates they would like to enable - * ncq command priorities. This works only on SATA devices. - */ -bool scsih_ncq_prio_supp(struct scsi_device *sdev) -{ - struct scsi_vpd *vpd; - bool ncq_prio_supp = false; - - rcu_read_lock(); - vpd = rcu_dereference(sdev->vpd_pg89); - if (!vpd || vpd->len < 214) - goto out; - - ncq_prio_supp = (vpd->data[213] >> 4) & 1; -out: - rcu_read_unlock(); - - return ncq_prio_supp; -} /* * The pci device ids are defined in mpi/mpi2_cnfg.h. */ diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 424a89513814b..4e33f1661e4c1 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev) } EXPORT_SYMBOL_GPL(sas_is_tlr_enabled); +/** + * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support + * @sdev: SCSI device + * + * Check if an ATA device supports NCQ priority using VPD page 89h (ATA + * Information). Since this VPD page is implemented only for ATA devices, + * this function always returns false for SCSI devices. + */ +bool sas_ata_ncq_prio_supported(struct scsi_device *sdev) +{ + struct scsi_vpd *vpd; + bool ncq_prio_supported = false; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (vpd && vpd->len >= 214) + ncq_prio_supported = (vpd->data[213] >> 4) & 1; + rcu_read_unlock(); + + return ncq_prio_supported; +} +EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported); + /* * SAS Phy attributes */ diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h index 0e75b9277c8c6..e3b6ce3cbf883 100644 --- a/include/scsi/scsi_transport_sas.h +++ b/include/scsi/scsi_transport_sas.h @@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *); void sas_disable_tlr(struct scsi_device *); void sas_enable_tlr(struct scsi_device *); +bool sas_ata_ncq_prio_supported(struct scsi_device *sdev); + extern struct sas_rphy *sas_end_device_alloc(struct sas_port *); extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type); void sas_rphy_free(struct sas_rphy *); From 144ba8580bcb82b2686c3d1a043299d844b9a682 Mon Sep 17 00:00:00 2001 From: Kory Maincent Date: Mon, 10 Jun 2024 10:34:26 +0200 Subject: [PATCH 469/537] net: pse-pd: Use EOPNOTSUPP error code instead of ENOTSUPP ENOTSUPP is not a SUSV4 error code, prefer EOPNOTSUPP as reported by checkpatch script. Fixes: 18ff0bcda6d1 ("ethtool: add interface to interact with Ethernet Power Equipment") Reviewed-by: Andrew Lunn Acked-by: Oleksij Rempel Signed-off-by: Kory Maincent Link: https://lore.kernel.org/r/20240610083426.740660-1-kory.maincent@bootlin.com Signed-off-by: Jakub Kicinski --- include/linux/pse-pd/pse.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index 6d07c95dabb9a..6eec24ffa8669 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -167,14 +167,14 @@ static inline int pse_ethtool_get_status(struct pse_control *psec, struct netlink_ext_ack *extack, struct pse_control_status *status) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int pse_ethtool_set_config(struct pse_control *psec, struct netlink_ext_ack *extack, const struct pse_control_config *config) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline bool pse_has_podl(struct pse_control *psec) From 1b9f756344416e02b41439bf2324b26aa25e141c Mon Sep 17 00:00:00 2001 From: Joshua Washington Date: Mon, 10 Jun 2024 15:57:18 -0700 Subject: [PATCH 470/537] gve: ignore nonrelevant GSO type bits when processing TSO headers TSO currently fails when the skb's gso_type field has more than one bit set. TSO packets can be passed from userspace using PF_PACKET, TUNTAP and a few others, using virtio_net_hdr (e.g., PACKET_VNET_HDR). This includes virtualization, such as QEMU, a real use-case. The gso_type and gso_size fields as passed from userspace in virtio_net_hdr are not trusted blindly by the kernel. It adds gso_type |= SKB_GSO_DODGY to force the packet to enter the software GSO stack for verification. This issue might similarly come up when the CWR bit is set in the TCP header for congestion control, causing the SKB_GSO_TCP_ECN gso_type bit to be set. Fixes: a57e5de476be ("gve: DQO: Add TX path") Signed-off-by: Joshua Washington Reviewed-by: Praveen Kaligineedi Reviewed-by: Harshitha Ramamurthy Reviewed-by: Willem de Bruijn Suggested-by: Eric Dumazet Acked-by: Andrei Vagin v2 - Remove unnecessary comments, remove line break between fixes tag and signoffs. v3 - Add back unrelated empty line removal. Link: https://lore.kernel.org/r/20240610225729.2985343-1-joshwash@google.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/google/gve/gve_tx_dqo.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index fe1b26a4d7366..0b3cca3fc7921 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb) if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO)) return -1; + if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + return -EINVAL; + /* Needed because we will modify header. */ err = skb_cow_head(skb, 0); if (err < 0) return err; tcp = tcp_hdr(skb); - - /* Remove payload length from checksum. */ paylen = skb->len - skb_transport_offset(skb); - - switch (skb_shinfo(skb)->gso_type) { - case SKB_GSO_TCPV4: - case SKB_GSO_TCPV6: - csum_replace_by_diff(&tcp->check, - (__force __wsum)htonl(paylen)); - - /* Compute length of segmentation header. */ - header_len = skb_tcp_all_headers(skb); - break; - default: - return -EINVAL; - } + csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen)); + header_len = skb_tcp_all_headers(skb); if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO)) return -EINVAL; From be27b896529787e23a35ae4befb6337ce73fcca0 Mon Sep 17 00:00:00 2001 From: Xiaolei Wang Date: Sat, 8 Jun 2024 22:35:24 +0800 Subject: [PATCH 471/537] net: stmmac: replace priv->speed with the portTransmitRate from the tc-cbs parameters The current cbs parameter depends on speed after uplinking, which is not needed and will report a configuration error if the port is not initially connected. The UAPI exposed by tc-cbs requires userspace to recalculate the send slope anyway, because the formula depends on port_transmit_rate (see man tc-cbs), which is not an invariant from tc's perspective. Therefore, we use offload->sendslope and offload->idleslope to derive the original port_transmit_rate from the CBS formula. Fixes: 1f705bc61aee ("net: stmmac: Add support for CBS QDISC") Signed-off-by: Xiaolei Wang Reviewed-by: Wojciech Drewek Reviewed-by: Vladimir Oltean Link: https://lore.kernel.org/r/20240608143524.2065736-1-xiaolei.wang@windriver.com Signed-off-by: Jakub Kicinski --- .../net/ethernet/stmicro/stmmac/stmmac_tc.c | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 222540b554807..1562fbdd0a040 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv, struct tc_cbs_qopt_offload *qopt) { u32 tx_queues_count = priv->plat->tx_queues_to_use; + s64 port_transmit_rate_kbps; u32 queue = qopt->queue; - u32 ptr, speed_div; u32 mode_to_use; u64 value; + u32 ptr; int ret; /* Queue 0 is not AVB capable */ @@ -355,30 +356,26 @@ static int tc_setup_cbs(struct stmmac_priv *priv, if (!priv->dma_cap.av) return -EOPNOTSUPP; + port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope; + /* Port Transmit Rate and Speed Divider */ - switch (priv->speed) { + switch (div_s64(port_transmit_rate_kbps, 1000)) { case SPEED_10000: - ptr = 32; - speed_div = 10000000; - break; case SPEED_5000: ptr = 32; - speed_div = 5000000; break; case SPEED_2500: - ptr = 8; - speed_div = 2500000; - break; case SPEED_1000: ptr = 8; - speed_div = 1000000; break; case SPEED_100: ptr = 4; - speed_div = 100000; break; default: - return -EOPNOTSUPP; + netdev_err(priv->dev, + "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n", + port_transmit_rate_kbps); + return -EINVAL; } mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; @@ -398,10 +395,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv, } /* Final adjustments for HW */ - value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div); + value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps); priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0); - value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div); + value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps); priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0); value = qopt->hicredit * 1024ll * 8; From 1cdeca6a7264021e20157de0baf7880ff0ced822 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Mon, 10 Jun 2024 23:06:19 +0900 Subject: [PATCH 472/537] ksmbd: move leading slash check to smb2_get_name() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the directory name in the root of the share starts with character like 镜(0x955c) or Ṝ(0x1e5c), it (and anything inside) cannot be accessed. The leading slash check must be checked after converting unicode to nls string. Cc: stable@vger.kernel.org Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index b6c5a8ea38879..f79d06d2d655c 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -630,6 +630,12 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls) return name; } + if (*name == '\\') { + pr_err("not allow directory name included leading slash\n"); + kfree(name); + return ERR_PTR(-EINVAL); + } + ksmbd_conv_path_to_unix(name); ksmbd_strip_last_slash(name); return name; @@ -2842,20 +2848,11 @@ int smb2_open(struct ksmbd_work *work) } if (req->NameLength) { - if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) && - *(char *)req->Buffer == '\\') { - pr_err("not allow directory name included leading slash\n"); - rc = -EINVAL; - goto err_out2; - } - name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset), le16_to_cpu(req->NameLength), work->conn->local_nls); if (IS_ERR(name)) { rc = PTR_ERR(name); - if (rc != -ENOMEM) - rc = -ENOENT; name = NULL; goto err_out2; } From 2bfc4214c69c62da13a9da8e3c3db5539da2ccd3 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Tue, 11 Jun 2024 23:27:27 +0900 Subject: [PATCH 473/537] ksmbd: fix missing use of get_write in in smb2_set_ea() Fix an issue where get_write is not used in smb2_set_ea(). Fixes: 6fc0a265e1b9 ("ksmbd: fix potential circular locking issue in smb2_set_ea()") Cc: stable@vger.kernel.org Reported-by: Wang Zhaolong Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/smb/server/smb2pdu.c | 7 ++++--- fs/smb/server/vfs.c | 17 +++++++++++------ fs/smb/server/vfs.h | 3 ++- fs/smb/server/vfs_cache.c | 3 ++- 4 files changed, 19 insertions(+), 11 deletions(-) diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index f79d06d2d655c..e7e07891781b3 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -2367,7 +2367,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len, if (rc > 0) { rc = ksmbd_vfs_remove_xattr(idmap, path, - attr_name); + attr_name, + get_write); if (rc < 0) { ksmbd_debug(SMB, @@ -2382,7 +2383,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len, } else { rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value, le16_to_cpu(eabuf->EaValueLength), - 0, true); + 0, get_write); if (rc < 0) { ksmbd_debug(SMB, "ksmbd_vfs_setxattr is failed(%d)\n", @@ -2474,7 +2475,7 @@ static int smb2_remove_smb_xattrs(const struct path *path) !strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX, STREAM_PREFIX_LEN)) { err = ksmbd_vfs_remove_xattr(idmap, path, - name); + name, true); if (err) ksmbd_debug(SMB, "remove xattr failed : %s\n", name); diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 51b1b0bed616e..9e859ba010cf1 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -1058,16 +1058,21 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length, } int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap, - const struct path *path, char *attr_name) + const struct path *path, char *attr_name, + bool get_write) { int err; - err = mnt_want_write(path->mnt); - if (err) - return err; + if (get_write == true) { + err = mnt_want_write(path->mnt); + if (err) + return err; + } err = vfs_removexattr(idmap, path->dentry, attr_name); - mnt_drop_write(path->mnt); + + if (get_write == true) + mnt_drop_write(path->mnt); return err; } @@ -1380,7 +1385,7 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path) ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name)); if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) { - err = ksmbd_vfs_remove_xattr(idmap, path, name); + err = ksmbd_vfs_remove_xattr(idmap, path, name, true); if (err) ksmbd_debug(SMB, "remove xattr failed : %s\n", name); } diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h index cfe1c8092f230..cb76f4b5bafe8 100644 --- a/fs/smb/server/vfs.h +++ b/fs/smb/server/vfs.h @@ -114,7 +114,8 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap, int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name, size_t *xattr_stream_name_size, int s_type); int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap, - const struct path *path, char *attr_name); + const struct path *path, char *attr_name, + bool get_write); int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, unsigned int flags, struct path *parent_path, struct path *path, bool caseless); diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c index 6cb599cd287ee..8b2e37c8716ed 100644 --- a/fs/smb/server/vfs_cache.c +++ b/fs/smb/server/vfs_cache.c @@ -254,7 +254,8 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp) ci->m_flags &= ~S_DEL_ON_CLS_STREAM; err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp), &filp->f_path, - fp->stream.name); + fp->stream.name, + true); if (err) pr_err("remove xattr failed : %s\n", fp->stream.name); From f0260589b439e2637ad54a2b25f00a516ef28a57 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Tue, 11 Jun 2024 15:06:07 +0300 Subject: [PATCH 474/537] xhci: Set correct transferred length for cancelled bulk transfers The transferred length is set incorrectly for cancelled bulk transfer TDs in case the bulk transfer ring stops on the last transfer block with a 'Stop - Length Invalid' completion code. length essentially ends up being set to the requested length: urb->actual_length = urb->transfer_buffer_length Length for 'Stop - Length Invalid' cases should be the sum of all TRB transfer block lengths up to the one the ring stopped on, _excluding_ the one stopped on. Fix this by always summing up TRB lengths for 'Stop - Length Invalid' bulk cases. This issue was discovered by Alan Stern while debugging https://bugzilla.kernel.org/show_bug.cgi?id=218890, but does not solve that bug. Issue is older than 4.10 kernel but fix won't apply to those due to major reworks in that area. Tested-by: Pierre Tomon Cc: stable@vger.kernel.org # v4.10+ Cc: Alan Stern Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20240611120610.3264502-2-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-ring.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9e90d29527600..1db61bb2b9b56 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2524,9 +2524,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, goto finish_td; case COMP_STOPPED_LENGTH_INVALID: /* stopped on ep trb with invalid length, exclude it */ - ep_trb_len = 0; - remaining = 0; - break; + td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb); + goto finish_td; case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || (ep->err_count++ > MAX_SOFT_RETRY) || From 17bd54555c2aaecfdb38e2734149f684a73fa584 Mon Sep 17 00:00:00 2001 From: Kuangyi Chiang Date: Tue, 11 Jun 2024 15:06:08 +0300 Subject: [PATCH 475/537] xhci: Apply reset resume quirk to Etron EJ188 xHCI host As described in commit c877b3b2ad5c ("xhci: Add reset on resume quirk for asrock p67 host"), EJ188 have the same issue as EJ168, where completely dies on resume. So apply XHCI_RESET_ON_RESUME quirk to EJ188 as well. Cc: stable@vger.kernel.org Signed-off-by: Kuangyi Chiang Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20240611120610.3264502-3-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c040d816e6266..dd8256e4e02af 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -36,6 +36,7 @@ #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_EJ168 0x7023 +#define PCI_DEVICE_ID_EJ188 0x7052 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -395,6 +396,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_RESET_ON_RESUME; xhci->quirks |= XHCI_BROKEN_STREAMS; } + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ188) + xhci->quirks |= XHCI_RESET_ON_RESUME; + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { xhci->quirks |= XHCI_ZERO_64B_REGS; From 91f7a1524a92c70ffe264db8bdfa075f15bbbeb9 Mon Sep 17 00:00:00 2001 From: Kuangyi Chiang Date: Tue, 11 Jun 2024 15:06:09 +0300 Subject: [PATCH 476/537] xhci: Apply broken streams quirk to Etron EJ188 xHCI host As described in commit 8f873c1ff4ca ("xhci: Blacklist using streams on the Etron EJ168 controller"), EJ188 have the same issue as EJ168, where Streams do not work reliable on EJ188. So apply XHCI_BROKEN_STREAMS quirk to EJ188 as well. Cc: stable@vger.kernel.org Signed-off-by: Kuangyi Chiang Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20240611120610.3264502-4-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index dd8256e4e02af..05881153883ec 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -397,8 +397,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_BROKEN_STREAMS; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && - pdev->device == PCI_DEVICE_ID_EJ188) + pdev->device == PCI_DEVICE_ID_EJ188) { xhci->quirks |= XHCI_RESET_ON_RESUME; + xhci->quirks |= XHCI_BROKEN_STREAMS; + } if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { From 5ceac4402f5d975e5a01c806438eb4e554771577 Mon Sep 17 00:00:00 2001 From: Hector Martin Date: Tue, 11 Jun 2024 15:06:10 +0300 Subject: [PATCH 477/537] xhci: Handle TD clearing for multiple streams case When multiple streams are in use, multiple TDs might be in flight when an endpoint is stopped. We need to issue a Set TR Dequeue Pointer for each, to ensure everything is reset properly and the caches cleared. Change the logic so that any N>1 TDs found active for different streams are deferred until after the first one is processed, calling xhci_invalidate_cancelled_tds() again from xhci_handle_cmd_set_deq() to queue another command until we are done with all of them. Also change the error/"should never happen" paths to ensure we at least clear any affected TDs, even if we can't issue a command to clear the hardware cache, and complain loudly with an xhci_warn() if this ever happens. This problem case dates back to commit e9df17eb1408 ("USB: xhci: Correct assumptions about number of rings per endpoint.") early on in the XHCI driver's life, when stream support was first added. It was then identified but not fixed nor made into a warning in commit 674f8438c121 ("xhci: split handling halted endpoints into two steps"), which added a FIXME comment for the problem case (without materially changing the behavior as far as I can tell, though the new logic made the problem more obvious). Then later, in commit 94f339147fc3 ("xhci: Fix failure to give back some cached cancelled URBs."), it was acknowledged again. [Mathias: commit 94f339147fc3 ("xhci: Fix failure to give back some cached cancelled URBs.") was a targeted regression fix to the previously mentioned patch. Users reported issues with usb stuck after unmounting/disconnecting UAS devices. This rolled back the TD clearing of multiple streams to its original state.] Apparently the commit author was aware of the problem (yet still chose to submit it): It was still mentioned as a FIXME, an xhci_dbg() was added to log the problem condition, and the remaining issue was mentioned in the commit description. The choice of making the log type xhci_dbg() for what is, at this point, a completely unhandled and known broken condition is puzzling and unfortunate, as it guarantees that no actual users would see the log in production, thereby making it nigh undebuggable (indeed, even if you turn on DEBUG, the message doesn't really hint at there being a problem at all). It took me *months* of random xHC crashes to finally find a reliable repro and be able to do a deep dive debug session, which could all have been avoided had this unhandled, broken condition been actually reported with a warning, as it should have been as a bug intentionally left in unfixed (never mind that it shouldn't have been left in at all). > Another fix to solve clearing the caches of all stream rings with > cancelled TDs is needed, but not as urgent. 3 years after that statement and 14 years after the original bug was introduced, I think it's finally time to fix it. And maybe next time let's not leave bugs unfixed (that are actually worse than the original bug), and let's actually get people to review kernel commits please. Fixes xHC crashes and IOMMU faults with UAS devices when handling errors/faults. Easiest repro is to use `hdparm` to mark an early sector (e.g. 1024) on a disk as bad, then `cat /dev/sdX > /dev/null` in a loop. At least in the case of JMicron controllers, the read errors end up having to cancel two TDs (for two queued requests to different streams) and the one that didn't get cleared properly ends up faulting the xHC entirely when it tries to access DMA pages that have since been unmapped, referred to by the stale TDs. This normally happens quickly (after two or three loops). After this fix, I left the `cat` in a loop running overnight and experienced no xHC failures, with all read errors recovered properly. Repro'd and tested on an Apple M1 Mac Mini (dwc3 host). On systems without an IOMMU, this bug would instead silently corrupt freed memory, making this a security bug (even on systems with IOMMUs this could silently corrupt memory belonging to other USB devices on the same controller, so it's still a security bug). Given that the kernel autoprobes partition tables, I'm pretty sure a malicious USB device pretending to be a UAS device and reporting an error with the right timing could deliberately trigger a UAF and write to freed memory, with no user action. [Mathias: Commit message and code comment edit, original at:] https://lore.kernel.org/linux-usb/20240524-xhci-streams-v1-1-6b1f13819bea@marcan.st/ Fixes: e9df17eb1408 ("USB: xhci: Correct assumptions about number of rings per endpoint.") Fixes: 94f339147fc3 ("xhci: Fix failure to give back some cached cancelled URBs.") Fixes: 674f8438c121 ("xhci: split handling halted endpoints into two steps") Cc: stable@vger.kernel.org Cc: security@kernel.org Reviewed-by: Neal Gompa Signed-off-by: Hector Martin Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20240611120610.3264502-5-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-ring.c | 54 ++++++++++++++++++++++++++++-------- drivers/usb/host/xhci.h | 1 + 2 files changed, 44 insertions(+), 11 deletions(-) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 1db61bb2b9b56..fd0cde3d1569c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1031,13 +1031,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) break; case TD_DIRTY: /* TD is cached, clear it */ case TD_HALTED: + case TD_CLEARING_CACHE_DEFERRED: + if (cached_td) { + if (cached_td->urb->stream_id != td->urb->stream_id) { + /* Multiple streams case, defer move dq */ + xhci_dbg(xhci, + "Move dq deferred: stream %u URB %p\n", + td->urb->stream_id, td->urb); + td->cancel_status = TD_CLEARING_CACHE_DEFERRED; + break; + } + + /* Should never happen, but clear the TD if it does */ + xhci_warn(xhci, + "Found multiple active URBs %p and %p in stream %u?\n", + td->urb, cached_td->urb, + td->urb->stream_id); + td_to_noop(xhci, ring, cached_td, false); + cached_td->cancel_status = TD_CLEARED; + } + td->cancel_status = TD_CLEARING_CACHE; - if (cached_td) - /* FIXME stream case, several stopped rings */ - xhci_dbg(xhci, - "Move dq past stream %u URB %p instead of stream %u URB %p\n", - td->urb->stream_id, td->urb, - cached_td->urb->stream_id, cached_td->urb); cached_td = td; break; } @@ -1057,10 +1071,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) if (err) { /* Failed to move past cached td, just set cached TDs to no-op */ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { - if (td->cancel_status != TD_CLEARING_CACHE) + /* + * Deferred TDs need to have the deq pointer set after the above command + * completes, so if that failed we just give up on all of them (and + * complain loudly since this could cause issues due to caching). + */ + if (td->cancel_status != TD_CLEARING_CACHE && + td->cancel_status != TD_CLEARING_CACHE_DEFERRED) continue; - xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", - td->urb); + xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", + td->urb); td_to_noop(xhci, ring, td, false); td->cancel_status = TD_CLEARED; } @@ -1346,6 +1366,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_td *td, *tmp_td; + bool deferred = false; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); @@ -1432,6 +1453,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", __func__, td->urb); xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); + } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) { + deferred = true; } else { xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", __func__, td->urb, td->cancel_status); @@ -1441,8 +1464,17 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ep->ep_state &= ~SET_DEQ_PENDING; ep->queued_deq_seg = NULL; ep->queued_deq_ptr = NULL; - /* Restart any rings with pending URBs */ - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + + if (deferred) { + /* We have more streams to clear */ + xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", + __func__); + xhci_invalidate_cancelled_tds(ep); + } else { + /* Restart any rings with pending URBs */ + xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } } static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 30415158ed3cb..78d014c4d884a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1276,6 +1276,7 @@ enum xhci_cancelled_td_status { TD_DIRTY = 0, TD_HALTED, TD_CLEARING_CACHE, + TD_CLEARING_CACHE_DEFERRED, TD_CLEARED, }; From 0320ca14c6fb68ad19aa72e55a1a21c061b2946b Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Wed, 12 Jun 2024 09:23:13 +0200 Subject: [PATCH 478/537] drm: renesas: shmobile: Call drm_atomic_helper_shutdown() at shutdown time Based on grepping through the source code, this driver appears to be missing a call to drm_atomic_helper_shutdown() at system shutdown time. This is important because drm_atomic_helper_shutdown() will cause panels to get disabled cleanly which may be important for their power sequencing. Future changes will remove any custom powering off in individual panel drivers so the DRM drivers need to start getting this right. The fact that we should call drm_atomic_helper_shutdown() in the case of OS shutdown comes straight out of the kernel doc "driver instance overview" in drm_drv.c. [geert: shmob_drm_remove() already calls drm_atomic_helper_shutdown] Suggested-by: Maxime Ripard Signed-off-by: Douglas Anderson Link: https://lore.kernel.org/r/20230901164111.RFT.15.Iaf638a1d4c8b3c307a6192efabb4cbb06b195f15@changeid [geert: s/drm_helper_force_disable_all/drm_atomic_helper_shutdown/] Signed-off-by: Geert Uytterhoeven Reviewed-by: Laurent Pinchart Reviewed-by: Sui Jingfeng Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/17c6a5a668e5975f871b77fb1fca6711a0799d9e.1718176895.git.geert+renesas@glider.be --- drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c index e83c3e52251de..0250d5f00bf10 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c @@ -171,6 +171,13 @@ static void shmob_drm_remove(struct platform_device *pdev) drm_kms_helper_poll_fini(ddev); } +static void shmob_drm_shutdown(struct platform_device *pdev) +{ + struct shmob_drm_device *sdev = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(&sdev->ddev); +} + static int shmob_drm_probe(struct platform_device *pdev) { struct shmob_drm_platform_data *pdata = pdev->dev.platform_data; @@ -273,6 +280,7 @@ static const struct of_device_id shmob_drm_of_table[] __maybe_unused = { static struct platform_driver shmob_drm_platform_driver = { .probe = shmob_drm_probe, .remove_new = shmob_drm_remove, + .shutdown = shmob_drm_shutdown, .driver = { .name = "shmob-drm", .of_match_table = of_match_ptr(shmob_drm_of_table), From c38896ca6318c2df20bbe6c8e3f633e071fda910 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Tue, 11 Jun 2024 10:27:44 -0700 Subject: [PATCH 479/537] drm/mediatek: Call drm_atomic_helper_shutdown() at shutdown time Based on grepping through the source code this driver appears to be missing a call to drm_atomic_helper_shutdown() at system shutdown time. Among other things, this means that if a panel is in use that it won't be cleanly powered off at system shutdown time. The fact that we should call drm_atomic_helper_shutdown() in the case of OS shutdown/restart comes straight out of the kernel doc "driver instance overview" in drm_drv.c. This driver users the component model and shutdown happens in the base driver. The "drvdata" for this driver will always be valid if shutdown() is called and as of commit 2a073968289d ("drm/atomic-helper: drm_atomic_helper_shutdown(NULL) should be a noop") we don't need to confirm that "drm" is non-NULL. Suggested-by: Maxime Ripard Reviewed-by: Maxime Ripard Reviewed-by: Fei Shao Tested-by: Fei Shao Signed-off-by: Douglas Anderson Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20240611102744.v2.1.I2b014f90afc4729b6ecc7b5ddd1f6dedcea4625b@changeid --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index b5f605751b0a1..de811e2265da7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -952,6 +952,13 @@ static void mtk_drm_remove(struct platform_device *pdev) of_node_put(private->comp_node[i]); } +static void mtk_drm_shutdown(struct platform_device *pdev) +{ + struct mtk_drm_private *private = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(private->drm); +} + static int mtk_drm_sys_prepare(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); @@ -983,6 +990,7 @@ static const struct dev_pm_ops mtk_drm_pm_ops = { static struct platform_driver mtk_drm_platform_driver = { .probe = mtk_drm_probe, .remove_new = mtk_drm_remove, + .shutdown = mtk_drm_shutdown, .driver = { .name = "mediatek-drm", .pm = &mtk_drm_pm_ops, From 350cbb5d2f676bff22c49e5e81764c3b8da342a9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Jun 2024 16:53:06 +0200 Subject: [PATCH 480/537] cpufreq: intel_pstate: Check turbo_is_disabled() in store_no_turbo() After recent changes in intel_pstate, global.turbo_disabled is only set at the initialization time and never changed. However, it turns out that on some systems the "turbo disabled" bit in MSR_IA32_MISC_ENABLE, the initial state of which is reflected by global.turbo_disabled, can be flipped later and there should be a way to take that into account (other than checking that MSR every time the driver runs which is costly and useless overhead on the vast majority of systems). For this purpose, notice that before the changes in question, store_no_turbo() contained a turbo_is_disabled() check that was used for updating global.turbo_disabled if the "turbo disabled" bit in MSR_IA32_MISC_ENABLE had been flipped and that functionality can be restored. Then, users will be able to reset global.turbo_disabled by writing 0 to no_turbo which used to work before on systems with flipping "turbo disabled" bit. This guarantees the driver state to remain in sync, but READ_ONCE() annotations need to be added in two places where global.turbo_disabled is accessed locklessly, so modify the driver to make that happen. Fixes: 0940f1a8011f ("cpufreq: intel_pstate: Do not update global.turbo_disabled after initialization") Closes: https://lore.kernel.org/linux-pm/bf3ebf1571a4788e97daf861eb493c12d42639a3.camel@xry111.site Suggested-by: Srinivas Pandruvada Reported-by: Xi Ruoyao Tested-by: Xi Ruoyao Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 65d3f79104bd5..15de5e3d96fd4 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1302,12 +1302,17 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, no_turbo = !!clamp_t(int, input, 0, 1); - if (no_turbo == global.no_turbo) - goto unlock_driver; - - if (global.turbo_disabled) { - pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); + WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); + if (global.turbo_disabled && !no_turbo) { + pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); count = -EPERM; + if (global.no_turbo) + goto unlock_driver; + else + no_turbo = 1; + } + + if (no_turbo == global.no_turbo) { goto unlock_driver; } @@ -1762,7 +1767,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1927,7 +1932,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; return val; From d6d5645e5fc1233a7ba950de4a72981c394a2557 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Fri, 31 May 2024 11:19:14 +0200 Subject: [PATCH 481/537] i2c: at91: Fix the functionality flags of the slave-only interface When an I2C adapter acts only as a slave, it should not claim to support I2C master capabilities. Fixes: 9d3ca54b550c ("i2c: at91: added slave mode support") Signed-off-by: Jean Delvare Cc: Juergen Fitschen Cc: Ludovic Desroches Cc: Codrin Ciubotariu Cc: Andi Shyti Cc: Nicolas Ferre Cc: Alexandre Belloni Cc: Claudiu Beznea Signed-off-by: Andi Shyti --- drivers/i2c/busses/i2c-at91-slave.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c index d6eeea5166c04..131a67d9d4a68 100644 --- a/drivers/i2c/busses/i2c-at91-slave.c +++ b/drivers/i2c/busses/i2c-at91-slave.c @@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave) static u32 at91_twi_func(struct i2c_adapter *adapter) { - return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL - | I2C_FUNC_SMBUS_READ_BLOCK_DATA; + return I2C_FUNC_SLAVE; } static const struct i2c_algorithm at91_twi_algorithm_slave = { From cbf3fb5b29e99e3689d63a88c3cddbffa1b8de99 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Fri, 31 May 2024 11:17:48 +0200 Subject: [PATCH 482/537] i2c: designware: Fix the functionality flags of the slave-only interface When an I2C adapter acts only as a slave, it should not claim to support I2C master capabilities. Fixes: 5b6d721b266a ("i2c: designware: enable SLAVE in platform module") Signed-off-by: Jean Delvare Cc: Luis Oliveira Cc: Jarkko Nikula Cc: Andy Shevchenko Cc: Mika Westerberg Cc: Jan Dabros Cc: Andi Shyti Reviewed-by: Andy Shevchenko Acked-by: Jarkko Nikula Tested-by: Jarkko Nikula Signed-off-by: Andi Shyti --- drivers/i2c/busses/i2c-designware-slave.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 2e079cf20bb5b..78e2c47e3d7da 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = { void i2c_dw_configure_slave(struct dw_i2c_dev *dev) { - dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; + dev->functionality = I2C_FUNC_SLAVE; dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED; From cea2a26553ace13ee36b56dc09ad548b5e6907df Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 11 Jun 2024 17:58:57 +0300 Subject: [PATCH 483/537] mailmap: Add my outdated addresses to the map file There is a couple of outdated addresses that are still visible in the Git history, add them to .mailmap. While at it, replace one in the comment. Signed-off-by: Andy Shevchenko Signed-off-by: Linus Torvalds --- .mailmap | 2 ++ drivers/media/pci/saa7134/saa7134-cards.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.mailmap b/.mailmap index efd9fa867a8ec..b6e9fe0916cec 100644 --- a/.mailmap +++ b/.mailmap @@ -72,6 +72,8 @@ Andrey Ryabinin Andrzej Hajda André Almeida Andy Adamson +Andy Shevchenko +Andy Shevchenko Anilkumar Kolli Anirudh Ghayal Antoine Tenart diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c index 1280696f65f26..e80fb4ebfda61 100644 --- a/drivers/media/pci/saa7134/saa7134-cards.c +++ b/drivers/media/pci/saa7134/saa7134-cards.c @@ -5152,7 +5152,7 @@ struct saa7134_board saa7134_boards[] = { }, }, [SAA7134_BOARD_AVERMEDIA_STUDIO_507UA] = { - /* Andy Shevchenko */ + /* Andy Shevchenko */ .name = "Avermedia AVerTV Studio 507UA", .audio_clock = 0x00187de7, .tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* Should be MK5 */ From 9b1ebce6a1fded90d4a1c6c57dc6262dac4c4c14 Mon Sep 17 00:00:00 2001 From: Su Hui Date: Tue, 11 Jun 2024 15:37:00 +0800 Subject: [PATCH 484/537] block: sed-opal: avoid possible wrong address reference in read_sed_opal_key() Clang static checker (scan-build) warning: block/sed-opal.c:line 317, column 3 Value stored to 'ret' is never read. Fix this problem by returning the error code when keyring_search() failed. Otherwise, 'key' will have a wrong value when 'kerf' stores the error code. Fixes: 3bfeb6125664 ("block: sed-opal: keyring support for SED keys") Signed-off-by: Su Hui Link: https://lore.kernel.org/r/20240611073659.429582-1-suhui@nfschina.com Signed-off-by: Jens Axboe --- block/sed-opal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/sed-opal.c b/block/sed-opal.c index 14fe0fef811cf..598fd3e7fcc8e 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen) &key_type_user, key_name, true); if (IS_ERR(kref)) - ret = PTR_ERR(kref); + return PTR_ERR(kref); key = key_ref_to_ptr(kref); down_read(&key->sem); From 1933192a91be0a570663a3c6310c46e4ce3b2baa Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Fri, 7 Jun 2024 09:21:26 +0900 Subject: [PATCH 485/537] block: Optimize disk zone resource cleanup For zoned block devices using zone write plugging, an rcu_barrier() call is needed in disk_free_zone_resources() to synchronize freeing of zone write plugs and the destrution of the mempool used to allocate the plugs. The barrier call does slow down a little teardown of zoned block devices but should not affect teardown of regular block devices or zoned block devices that do not use zone write plugging (e.g. zoned DM devices that do not require zone append emulation). Modify disk_free_zone_resources() to return early if we do not have a mempool to start with, that is, if the device does not use zone write plugging. This avoids the costly rcu_barrier() and speeds up disk teardown. Reported-by: Mikulas Patocka Fixes: dd291d77cc90 ("block: Introduce zone write plugging") Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Tested-by: Mikulas Patocka Reviewed-by: Niklas Cassel Link: https://lore.kernel.org/r/20240607002126.104227-1-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-zoned.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 52abebf56027a..08d7dfe8bd938 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -1552,6 +1552,9 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) void disk_free_zone_resources(struct gendisk *disk) { + if (!disk->zone_wplugs_pool) + return; + cancel_work_sync(&disk->zone_wplugs_work); if (disk->zone_wplugs_wq) { From d0321c812d89c5910d8da8e4b10c891c6b96ff70 Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Sat, 8 Jun 2024 22:31:15 +0800 Subject: [PATCH 486/537] block: fix request.queuelist usage in flush Friedrich Weber reported a kernel crash problem and bisected to commit 81ada09cc25e ("blk-flush: reuse rq queuelist in flush state machine"). The root cause is that we use "list_move_tail(&rq->queuelist, pending)" in the PREFLUSH/POSTFLUSH sequences. But rq->queuelist.next == xxx since it's popped out from plug->cached_rq in __blk_mq_alloc_requests_batch(). We don't initialize its queuelist just for this first request, although the queuelist of all later popped requests will be initialized. Fix it by changing to use "list_add_tail(&rq->queuelist, pending)" so rq->queuelist doesn't need to be initialized. It should be ok since rq can't be on any list when PREFLUSH or POSTFLUSH, has no move actually. Please note the commit 81ada09cc25e ("blk-flush: reuse rq queuelist in flush state machine") also has another requirement that no drivers would touch rq->queuelist after blk_mq_end_request() since we will reuse it to add rq to the post-flush pending list in POSTFLUSH. If this is not true, we will have to revert that commit IMHO. This updated version adds "list_del_init(&rq->queuelist)" in flush rq callback since the dm layer may submit request of a weird invalid format (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH), which causes double list_add if without this "list_del_init(&rq->queuelist)". The weird invalid format problem should be fixed in dm layer. Reported-by: Friedrich Weber Closes: https://lore.kernel.org/lkml/14b89dfb-505c-49f7-aebb-01c54451db40@proxmox.com/ Closes: https://lore.kernel.org/lkml/c9d03ff7-27c5-4ebd-b3f6-5a90d96f35ba@proxmox.com/ Fixes: 81ada09cc25e ("blk-flush: reuse rq queuelist in flush state machine") Cc: Christoph Hellwig Cc: ming.lei@redhat.com Cc: bvanassche@acm.org Tested-by: Friedrich Weber Signed-off-by: Chengming Zhou Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240608143115.972486-1-chengming.zhou@linux.dev Signed-off-by: Jens Axboe --- block/blk-flush.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/blk-flush.c b/block/blk-flush.c index c17cf8ed8113d..cca4f9131f795 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -185,7 +185,7 @@ static void blk_flush_complete_seq(struct request *rq, /* queue for flush */ if (list_empty(pending)) fq->flush_pending_since = jiffies; - list_move_tail(&rq->queuelist, pending); + list_add_tail(&rq->queuelist, pending); break; case REQ_FSEQ_DATA: @@ -263,6 +263,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq, unsigned int seq = blk_flush_cur_seq(rq); BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); + list_del_init(&rq->queuelist); blk_flush_complete_seq(rq, fq, seq, error); } From e038ee6189842e9662d2fc59d09dbcf48350cf99 Mon Sep 17 00:00:00 2001 From: Anuj Gupta Date: Mon, 10 Jun 2024 16:41:44 +0530 Subject: [PATCH 487/537] block: unmap and free user mapped integrity via submitter The user mapped intergity is copied back and unpinned by bio_integrity_free which is a low-level routine. Do it via the submitter rather than doing it in the low-level block layer code, to split the submitter side from the consumer side of the bio. Signed-off-by: Anuj Gupta Signed-off-by: Kanchan Joshi Reviewed-by: Christoph Hellwig Reviewed-by: Martin K. Petersen Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/20240610111144.14647-1-anuj20.g@samsung.com Signed-off-by: Jens Axboe --- block/bio-integrity.c | 26 ++++++++++++++++++++++++-- drivers/nvme/host/ioctl.c | 15 +++++++++++---- include/linux/bio.h | 4 ++++ 3 files changed, 39 insertions(+), 6 deletions(-) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 2e3e8e04961ea..8b528e12136f5 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -144,16 +144,38 @@ void bio_integrity_free(struct bio *bio) struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_set *bs = bio->bi_pool; + if (bip->bip_flags & BIP_INTEGRITY_USER) + return; if (bip->bip_flags & BIP_BLOCK_INTEGRITY) kfree(bvec_virt(bip->bip_vec)); - else if (bip->bip_flags & BIP_INTEGRITY_USER) - bio_integrity_unmap_user(bip); __bio_integrity_free(bs, bip); bio->bi_integrity = NULL; bio->bi_opf &= ~REQ_INTEGRITY; } +/** + * bio_integrity_unmap_free_user - Unmap and free bio user integrity payload + * @bio: bio containing bip to be unmapped and freed + * + * Description: Used to unmap and free the user mapped integrity portion of a + * bio. Submitter attaching the user integrity buffer is responsible for + * unmapping and freeing it during completion. + */ +void bio_integrity_unmap_free_user(struct bio *bio) +{ + struct bio_integrity_payload *bip = bio_integrity(bio); + struct bio_set *bs = bio->bi_pool; + + if (WARN_ON_ONCE(!(bip->bip_flags & BIP_INTEGRITY_USER))) + return; + bio_integrity_unmap_user(bip); + __bio_integrity_free(bs, bip); + bio->bi_integrity = NULL; + bio->bi_opf &= ~REQ_INTEGRITY; +} +EXPORT_SYMBOL(bio_integrity_unmap_free_user); + /** * bio_integrity_add_page - Attach integrity metadata * @bio: bio to update diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 9d9d2a127c4ec..8b69427a44762 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -111,6 +111,13 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, return req; } +static void nvme_unmap_bio(struct bio *bio) +{ + if (bio_integrity(bio)) + bio_integrity_unmap_free_user(bio); + blk_rq_unmap_user(bio); +} + static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags) @@ -157,7 +164,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, out_unmap: if (bio) - blk_rq_unmap_user(bio); + nvme_unmap_bio(bio); out: blk_mq_free_request(req); return ret; @@ -195,7 +202,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, if (result) *result = le64_to_cpu(nvme_req(req)->result.u64); if (bio) - blk_rq_unmap_user(bio); + nvme_unmap_bio(bio); blk_mq_free_request(req); if (effects) @@ -406,7 +413,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); if (pdu->bio) - blk_rq_unmap_user(pdu->bio); + nvme_unmap_bio(pdu->bio); io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags); } @@ -432,7 +439,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, */ if (blk_rq_is_poll(req)) { if (pdu->bio) - blk_rq_unmap_user(pdu->bio); + nvme_unmap_bio(pdu->bio); io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status); } else { io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); diff --git a/include/linux/bio.h b/include/linux/bio.h index d5379548d684e..818e936129478 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -731,6 +731,7 @@ static inline bool bioset_initialized(struct bio_set *bs) bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); +void bio_integrity_unmap_free_user(struct bio *bio); extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); extern bool bio_integrity_prep(struct bio *); @@ -807,6 +808,9 @@ static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, { return -EINVAL; } +static inline void bio_integrity_unmap_free_user(struct bio *bio) +{ +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ From e5d574ab37f5f2e7937405613d9b1a724811e5ad Mon Sep 17 00:00:00 2001 From: Chunguang Xu Date: Tue, 11 Jun 2024 18:02:08 +0800 Subject: [PATCH 488/537] nvme: avoid double free special payload If a discard request needs to be retried, and that retry may fail before a new special payload is added, a double free will result. Clear the RQF_SPECIAL_LOAD when the request is cleaned. Signed-off-by: Chunguang Xu Reviewed-by: Sagi Grimberg Reviewed-by: Max Gurtovoy Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f5d150c62955d..c40930d10bd34 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -998,6 +998,7 @@ void nvme_cleanup_cmd(struct request *req) clear_bit_unlock(0, &ctrl->discard_page_busy); else kfree(bvec_virt(&req->special_vec)); + req->rq_flags &= ~RQF_SPECIAL_PAYLOAD; } } EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); From d76584e53f4244dbc154bec447c3852600acc914 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Wed, 12 Jun 2024 16:02:40 +0200 Subject: [PATCH 489/537] nvmet-passthru: propagate status from id override functions The id override functions return a status which is not propagated to the caller. Fixes: c1fef73f793b ("nvmet: add passthru code to process commands") Signed-off-by: Daniel Wagner Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/target/passthru.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index bb4a69d538fd1..f003782d4ecff 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) req->cmd->common.opcode == nvme_admin_identify) { switch (req->cmd->identify.cns) { case NVME_ID_CNS_CTRL: - nvmet_passthru_override_id_ctrl(req); + status = nvmet_passthru_override_id_ctrl(req); break; case NVME_ID_CNS_NS: - nvmet_passthru_override_id_ns(req); + status = nvmet_passthru_override_id_ns(req); break; case NVME_ID_CNS_NS_DESC_LIST: - nvmet_passthru_override_id_descs(req); + status = nvmet_passthru_override_id_descs(req); break; } } else if (status < 0) From cd0c1b8e045a8d2785342b385cb2684d9b48e426 Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Wed, 12 Jun 2024 16:11:59 +0200 Subject: [PATCH 490/537] nvmet: always initialize cqe.result The spec doesn't mandate that the first two double words (aka results) for the command queue entry need to be set to 0 when they are not used (not specified). Though, the target implemention returns 0 for TCP and FC but not for RDMA. Let's make RDMA behave the same and thus explicitly initializing the result field. This prevents leaking any data from the stack. Signed-off-by: Daniel Wagner Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/target/core.c | 1 + drivers/nvme/target/fabrics-cmd-auth.c | 3 --- drivers/nvme/target/fabrics-cmd.c | 6 ------ 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 06f0c587f3437..4ff460ba28263 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -957,6 +957,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, req->metadata_sg_cnt = 0; req->transfer_len = 0; req->metadata_len = 0; + req->cqe->result.u64 = 0; req->cqe->status = 0; req->cqe->sq_head = 0; req->ns = NULL; diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index d61b8c6ff3b20..cb34d644ed086 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -333,7 +333,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req) pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n", __func__, ctrl->cntlid, req->sq->qid, status, req->error_loc); - req->cqe->result.u64 = 0; if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; @@ -516,8 +515,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) status = nvmet_copy_to_sgl(req, 0, d, al); kfree(d); done: - req->cqe->result.u64 = 0; - if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) nvmet_auth_sq_free(req->sq); else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 042b379cbb364..69d77d34bec11 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -226,9 +226,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) if (status) goto out; - /* zero out initial completion result, assign values as needed */ - req->cqe->result.u32 = 0; - if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); @@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (status) goto out; - /* zero out initial completion result, assign values as needed */ - req->cqe->result.u32 = 0; - if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); From 54559642b96116b45e4b5ca7fd9f7835b8561272 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 12 Jun 2024 13:56:38 +0100 Subject: [PATCH 491/537] io_uring/rsrc: don't lock while !TASK_RUNNING There is a report of io_rsrc_ref_quiesce() locking a mutex while not TASK_RUNNING, which is due to forgetting restoring the state back after io_run_task_work_sig() and attempts to break out of the waiting loop. do not call blocking ops when !TASK_RUNNING; state=1 set at [] prepare_to_wait+0xa4/0x380 kernel/sched/wait.c:237 WARNING: CPU: 2 PID: 397056 at kernel/sched/core.c:10099 __might_sleep+0x114/0x160 kernel/sched/core.c:10099 RIP: 0010:__might_sleep+0x114/0x160 kernel/sched/core.c:10099 Call Trace: __mutex_lock_common kernel/locking/mutex.c:585 [inline] __mutex_lock+0xb4/0x940 kernel/locking/mutex.c:752 io_rsrc_ref_quiesce+0x590/0x940 io_uring/rsrc.c:253 io_sqe_buffers_unregister+0xa2/0x340 io_uring/rsrc.c:799 __io_uring_register io_uring/register.c:424 [inline] __do_sys_io_uring_register+0x5b9/0x2400 io_uring/register.c:613 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xd8/0x270 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x6f/0x77 Reported-by: Li Shi Fixes: 4ea15b56f0810 ("io_uring/rsrc: use wq for quiescing") Cc: stable@vger.kernel.org Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/77966bc104e25b0534995d5dbb152332bc8f31c0.1718196953.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- io_uring/rsrc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 65417c9553b1d..edb9c5baf2e29 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -249,6 +249,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, ret = io_run_task_work_sig(ctx); if (ret < 0) { + __set_current_state(TASK_RUNNING); mutex_lock(&ctx->uring_lock); if (list_empty(&ctx->rsrc_ref_list)) ret = 0; From 957df9af723ccc570da835978cf7fe7863632842 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 4 Jun 2024 16:15:31 -0600 Subject: [PATCH 492/537] nbd: Remove __force casts Make it again possible for sparse to verify that blk_status_t and Unix error codes are used in the proper context by making nbd_send_cmd() return a blk_status_t instead of an integer. No functionality has been changed. Signed-off-by: Christoph Hellwig [ bvanassche: added description and made two small formatting changes ] Signed-off-by: Bart Van Assche Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20240604221531.327131-1-bvanassche@acm.org Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 51 +++++++++++++++++++-------------------------- 1 file changed, 22 insertions(+), 29 deletions(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 22a79a62cc4ea..b87aa80a46dda 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -589,10 +589,11 @@ static inline int was_interrupted(int result) } /* - * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns - * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed. + * Returns BLK_STS_RESOURCE if the caller should retry after a delay. + * Returns BLK_STS_IOERR if sending failed. */ -static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) +static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, + int index) { struct request *req = blk_mq_rq_from_pdu(cmd); struct nbd_config *config = nbd->config; @@ -614,13 +615,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) type = req_to_nbd_cmd_type(req); if (type == U32_MAX) - return -EIO; + return BLK_STS_IOERR; if (rq_data_dir(req) == WRITE && (config->flags & NBD_FLAG_READ_ONLY)) { dev_err_ratelimited(disk_to_dev(nbd->disk), "Write on read-only\n"); - return -EIO; + return BLK_STS_IOERR; } if (req->cmd_flags & REQ_FUA) @@ -674,11 +675,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) nsock->sent = sent; } set_bit(NBD_CMD_REQUEUED, &cmd->flags); - return (__force int)BLK_STS_RESOURCE; + return BLK_STS_RESOURCE; } dev_err_ratelimited(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); - return -EAGAIN; + goto requeue; } send_pages: if (type != NBD_CMD_WRITE) @@ -715,12 +716,12 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) nsock->pending = req; nsock->sent = sent; set_bit(NBD_CMD_REQUEUED, &cmd->flags); - return (__force int)BLK_STS_RESOURCE; + return BLK_STS_RESOURCE; } dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); - return -EAGAIN; + goto requeue; } /* * The completion might already have come in, @@ -737,7 +738,16 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) trace_nbd_payload_sent(req, handle); nsock->pending = NULL; nsock->sent = 0; - return 0; + __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); + return BLK_STS_OK; + +requeue: + /* retry on a different socket */ + dev_err_ratelimited(disk_to_dev(nbd->disk), + "Request send failed, requeueing\n"); + nbd_mark_nsock_dead(nbd, nsock, 1); + nbd_requeue_cmd(cmd); + return BLK_STS_OK; } static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock, @@ -1018,7 +1028,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index) struct nbd_device *nbd = cmd->nbd; struct nbd_config *config; struct nbd_sock *nsock; - int ret; + blk_status_t ret; lockdep_assert_held(&cmd->lock); @@ -1072,28 +1082,11 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index) ret = BLK_STS_OK; goto out; } - /* - * Some failures are related to the link going down, so anything that - * returns EAGAIN can be retried on a different socket. - */ ret = nbd_send_cmd(nbd, cmd, index); - /* - * Access to this flag is protected by cmd->lock, thus it's safe to set - * the flag after nbd_send_cmd() succeed to send request to server. - */ - if (!ret) - __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); - else if (ret == -EAGAIN) { - dev_err_ratelimited(disk_to_dev(nbd->disk), - "Request send failed, requeueing\n"); - nbd_mark_nsock_dead(nbd, nsock, 1); - nbd_requeue_cmd(cmd); - ret = BLK_STS_OK; - } out: mutex_unlock(&nsock->tx_lock); nbd_config_put(nbd); - return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret; + return ret; } static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, From d71a989cf5d961989c273093cdff2550acdde314 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 6 Jun 2024 21:52:07 -0600 Subject: [PATCH 493/537] vfio/pci: Insert full vma on mmap'd MMIO fault In order to improve performance of typical scenarios we can try to insert the entire vma on fault. This accelerates typical cases, such as when the MMIO region is DMA mapped by QEMU. The vfio_iommu_type1 driver will fault in the entire DMA mapped range through fixup_user_fault(). In synthetic testing, this improves the time required to walk a PCI BAR mapping from userspace by roughly 1/3rd. This is likely an interim solution until vmf_insert_pfn_{pmd,pud}() gain support for pfnmaps. Suggested-by: Yan Zhao Link: https://lore.kernel.org/all/Zl6XdUkt%2FzMMGOLF@yzhao56-desk.sh.intel.com/ Reviewed-by: Yan Zhao Link: https://lore.kernel.org/r/20240607035213.2054226-1-alex.williamson@redhat.com Signed-off-by: Alex Williamson --- drivers/vfio/pci/vfio_pci_core.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index db31c27bf78bc..987c7921affa6 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1662,6 +1662,7 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct vfio_pci_core_device *vdev = vma->vm_private_data; unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff; + unsigned long addr = vma->vm_start; vm_fault_t ret = VM_FAULT_SIGBUS; pfn = vma_to_pfn(vma); @@ -1669,11 +1670,25 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) down_read(&vdev->memory_lock); if (vdev->pm_runtime_engaged || !__vfio_pci_memory_enabled(vdev)) - goto out_disabled; + goto out_unlock; ret = vmf_insert_pfn(vma, vmf->address, pfn + pgoff); + if (ret & VM_FAULT_ERROR) + goto out_unlock; -out_disabled: + /* + * Pre-fault the remainder of the vma, abort further insertions and + * supress error if fault is encountered during pre-fault. + */ + for (; addr < vma->vm_end; addr += PAGE_SIZE, pfn++) { + if (addr == vmf->address) + continue; + + if (vmf_insert_pfn(vma, addr, pfn) & VM_FAULT_ERROR) + break; + } + +out_unlock: up_read(&vdev->memory_lock); return ret; From 14a20e5b4ad998793c5f43b0330d9e1388446cf3 Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Fri, 7 Jun 2024 13:28:28 +0200 Subject: [PATCH 494/537] net/ipv6: Fix the RT cache flush via sysctl using a previous delay The net.ipv6.route.flush system parameter takes a value which specifies a delay used during the flush operation for aging exception routes. The written value is however not used in the currently requested flush and instead utilized only in the next one. A problem is that ipv6_sysctl_rtcache_flush() first reads the old value of net->ipv6.sysctl.flush_delay into a local delay variable and then calls proc_dointvec() which actually updates the sysctl based on the provided input. Fix the problem by switching the order of the two operations. Fixes: 4990509f19e8 ("[NETNS][IPV6]: Make sysctls route per namespace.") Signed-off-by: Petr Pavlu Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20240607112828.30285-1-petr.pavlu@suse.com Signed-off-by: Jakub Kicinski --- net/ipv6/route.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f083d9faba6b1..952c2bf117094 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -6343,12 +6343,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, if (!write) return -EINVAL; - net = (struct net *)ctl->extra1; - delay = net->ipv6.sysctl.flush_delay; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (ret) return ret; + net = (struct net *)ctl->extra1; + delay = net->ipv6.sysctl.flush_delay; fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0); return 0; } From 36c92936e868601fa1f43da6758cf55805043509 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sun, 9 Jun 2024 13:36:53 +0300 Subject: [PATCH 495/537] net: bridge: mst: pass vlan group directly to br_mst_vlan_set_state Pass the already obtained vlan group pointer to br_mst_vlan_set_state() instead of dereferencing it again. Each caller has already correctly dereferenced it for their context. This change is required for the following suspicious RCU dereference fix. No functional changes intended. Fixes: 3a7c1661ae13 ("net: bridge: mst: fix vlan use-after-free") Reported-by: syzbot+9bbe2de1bc9d470eb5fe@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=9bbe2de1bc9d470eb5fe Signed-off-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20240609103654.914987-2-razor@blackwall.org Signed-off-by: Jakub Kicinski --- net/bridge/br_mst.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index 3c66141d34d62..1de72816b0fb2 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -73,11 +73,10 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state) } EXPORT_SYMBOL_GPL(br_mst_get_state); -static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v, +static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg, + struct net_bridge_vlan *v, u8 state) { - struct net_bridge_vlan_group *vg = nbp_vlan_group(p); - if (br_vlan_get_state(v) == state) return; @@ -121,7 +120,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, if (v->brvlan->msti != msti) continue; - br_mst_vlan_set_state(p, v, state); + br_mst_vlan_set_state(vg, v, state); } out: @@ -140,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti) * it. */ if (v != pv && v->brvlan->msti == msti) { - br_mst_vlan_set_state(pv->port, pv, v->state); + br_mst_vlan_set_state(vg, pv, v->state); return; } } /* Otherwise, start out in a new MSTI with all ports disabled. */ - return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED); + return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED); } int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti) From 546ceb1dfdac866648ec959cbc71d9525bd73462 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sun, 9 Jun 2024 13:36:54 +0300 Subject: [PATCH 496/537] net: bridge: mst: fix suspicious rcu usage in br_mst_set_state I converted br_mst_set_state to RCU to avoid a vlan use-after-free but forgot to change the vlan group dereference helper. Switch to vlan group RCU deref helper to fix the suspicious rcu usage warning. Fixes: 3a7c1661ae13 ("net: bridge: mst: fix vlan use-after-free") Reported-by: syzbot+9bbe2de1bc9d470eb5fe@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=9bbe2de1bc9d470eb5fe Signed-off-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20240609103654.914987-3-razor@blackwall.org Signed-off-by: Jakub Kicinski --- net/bridge/br_mst.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c index 1de72816b0fb2..1820f09ff59ce 100644 --- a/net/bridge/br_mst.c +++ b/net/bridge/br_mst.c @@ -102,7 +102,7 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state, int err = 0; rcu_read_lock(); - vg = nbp_vlan_group(p); + vg = nbp_vlan_group_rcu(p); if (!vg) goto out; From b2747f108b8034271fd5289bd8f3a7003e0775a3 Mon Sep 17 00:00:00 2001 From: Benjamin Segall Date: Wed, 12 Jun 2024 12:44:44 -0700 Subject: [PATCH 497/537] x86/boot: Don't add the EFI stub to targets, again This is a re-commit of da05b143a308 ("x86/boot: Don't add the EFI stub to targets") after the tagged patch incorrectly reverted it. vmlinux-objs-y is added to targets, with an assumption that they are all relative to $(obj); adding a $(objtree)/drivers/... path causes the build to incorrectly create a useless arch/x86/boot/compressed/drivers/... directory tree. Fix this just by using a different make variable for the EFI stub. Fixes: cb8bda8ad443 ("x86/boot/compressed: Rename efi_thunk_64.S to efi-mixed.S") Signed-off-by: Ben Segall Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ard Biesheuvel Cc: stable@vger.kernel.org # v6.1+ Link: https://lore.kernel.org/r/xm267ceukksz.fsf@bsegall.svl.corp.google.com --- arch/x86/boot/compressed/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 243ee86cb1b1e..f2051644de943 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -105,9 +105,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o -vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a +vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a -$(obj)/vmlinux: $(vmlinux-objs-y) FORCE +$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE $(call if_changed,ld) OBJCOPYFLAGS_vmlinux.bin := -R .comment -S From 12243a8115f8583a6bcada7717c01fb164e23c89 Mon Sep 17 00:00:00 2001 From: Dimitri Sivanich Date: Thu, 30 May 2024 13:36:03 -0500 Subject: [PATCH 498/537] iommu/amd: Fix panic accessing amd_iommu_enable_faulting This fixes a bug introduced by commit d74169ceb0d2 ("iommu/vt-d: Allocate DMAR fault interrupts locally"). The panic happens when amd_iommu_enable_faulting is called from CPUHP_AP_ONLINE_DYN context. Fixes: d74169ceb0d2 ("iommu/vt-d: Allocate DMAR fault interrupts locally") Signed-off-by: Dimitri Sivanich Tested-by: Yi Zhang Reviewed-by: Jerry Snitselaar Reviewed-by: Vasant Hegde Link: https://lore.kernel.org/r/ZljHE/R4KLzGU6vx@hpe.com Signed-off-by: Joerg Roedel --- drivers/iommu/amd/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 27e2937270950..1612480677764 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3362,7 +3362,7 @@ int amd_iommu_reenable(int mode) return 0; } -int __init amd_iommu_enable_faulting(unsigned int cpu) +int amd_iommu_enable_faulting(unsigned int cpu) { /* We enable MSI later when PCI is initialized */ return 0; From 0e6b6dedf16800df0ff73ffe2bb5066514db29c2 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 12 Jun 2024 16:15:55 +0200 Subject: [PATCH 499/537] ACPI: EC: Evaluate orphan _REG under EC device After starting to install the EC address space handler at the ACPI namespace root, if there is an "orphan" _REG method in the EC device's scope, it will not be evaluated any more. This breaks EC operation regions on some systems, like Asus gu605. To address this, use a wrapper around an existing ACPICA function to look for an "orphan" _REG method in the EC device scope and evaluate it if present. Fixes: 60fa6ae6e6d0 ("ACPI: EC: Install address space handler at the namespace root") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218945 Reported-by: VitaliiT Tested-by: VitaliiT Signed-off-by: Rafael J. Wysocki --- drivers/acpi/acpica/acevents.h | 4 +++ drivers/acpi/acpica/evregion.c | 6 +--- drivers/acpi/acpica/evxfregn.c | 54 ++++++++++++++++++++++++++++++++++ drivers/acpi/ec.c | 3 ++ include/acpi/acpixf.h | 4 +++ 5 files changed, 66 insertions(+), 5 deletions(-) diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index ddd072cbc738d..2133085deda77 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -191,6 +191,10 @@ void acpi_ev_execute_reg_methods(struct acpi_namespace_node *node, acpi_adr_space_type space_id, u32 function); +void +acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node, + acpi_adr_space_type space_id); + acpi_status acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function); diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 18fdf2bc2d499..dc6004daf624b 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[]; /* Local prototypes */ -static void -acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, - acpi_adr_space_type space_id); - static acpi_status acpi_ev_reg_run(acpi_handle obj_handle, u32 level, void *context, void **return_value); @@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle, * ******************************************************************************/ -static void +void acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node, acpi_adr_space_type space_id) { diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 3197e6303c5b0..624361a5f34d8 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id) } ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods) + +/******************************************************************************* + * + * FUNCTION: acpi_execute_orphan_reg_method + * + * PARAMETERS: device - Handle for the device + * space_id - The address space ID + * + * RETURN: Status + * + * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI + * device. This is a _REG method that has no corresponding region + * within the device's scope. + * + ******************************************************************************/ +acpi_status +acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id) +{ + struct acpi_namespace_node *node; + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method); + + /* Parameter validation */ + + if (!device) { + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + /* Convert and validate the device handle */ + + node = acpi_ns_validate_handle(device); + if (node) { + + /* + * If an "orphan" _REG method is present in the device's scope + * for the given address space ID, run it. + */ + + acpi_ev_execute_orphan_reg_method(node, space_id); + } else { + status = AE_BAD_PARAMETER; + } + + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + return_ACPI_STATUS(status); +} + +ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 68dd17f96f636..299ec653388ce 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1507,6 +1507,9 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device, if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC); + if (scope_handle != ec->handle) + acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC); + set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); } diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 94d0fc3bd412d..80dc36f9d5274 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -662,6 +662,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_execute_orphan_reg_method(acpi_handle device, + acpi_adr_space_type + space_id)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_remove_address_space_handler(acpi_handle device, From 7c877115da4196fa108dcfefd49f5a9b67b8d8ca Mon Sep 17 00:00:00 2001 From: Riana Tauro Date: Thu, 6 Jun 2024 15:38:41 +0530 Subject: [PATCH 500/537] drm/xe/xe_gt_idle: use GT forcewake domain assertion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The rc6 registers used in disable_c6 function belong to the GT forcewake domain. Hence change the forcewake assertion to check GT forcewake domain. v2: add fixes tag (Himal) Fixes: 975e4a3795d4 ("drm/xe: Manually setup C6 when skip_guc_pc is set") Signed-off-by: Riana Tauro Reviewed-by: Rodrigo Vivi Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20240606100842.956072-2-riana.tauro@intel.com Signed-off-by: Rodrigo Vivi (cherry picked from commit 21b708554648177a0078962c31629bce31ef5d83) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_gt_idle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index 8fc0f3f6ecc53..c699f47f4700d 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -199,7 +199,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt) void xe_gt_idle_disable_c6(struct xe_gt *gt) { xe_device_assert_mem_access(gt_to_xe(gt)); - xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); xe_mmio_write32(gt, PG_ENABLE, 0); xe_mmio_write32(gt, RC_CONTROL, 0); From cd554e1e118a6aa1c919309cd28398b003f69c1f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 7 Jun 2024 17:31:55 +0200 Subject: [PATCH 501/537] drm/xe/pf: Assert LMEM provisioning is done only on DGFX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Local Memory (aka VRAM) is only available on DGFX platforms. We shouldn't attempt to provision VFs with LMEM or attempt to update the LMTT on non-DGFX platforms. Add missing asserts that would enforce that and fix release code that could crash on iGFX due to uninitialized LMTT. Fixes: 0698ff57bf32 ("drm/xe/pf: Update the LMTT when freeing VF GT config") Signed-off-by: Michal Wajdeczko Cc: Piotr Piórkowski Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240607153155.1592-1-michal.wajdeczko@intel.com (cherry picked from commit b321cb83a375bcc18cd0a4b62bdeaf6905cca769) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 476d613333a98..6c2cfc54442ce 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -1274,6 +1274,9 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid) struct xe_tile *tile; unsigned int tid; + xe_assert(xe, IS_DGFX(xe)); + xe_assert(xe, IS_SRIOV_PF(xe)); + for_each_tile(tile, xe, tid) { lmtt = &tile->sriov.pf.lmtt; xe_lmtt_drop_pages(lmtt, vfid); @@ -1292,6 +1295,9 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid) unsigned int tid; int err; + xe_assert(xe, IS_DGFX(xe)); + xe_assert(xe, IS_SRIOV_PF(xe)); + total = 0; for_each_tile(tile, xe, tid) total += pf_get_vf_config_lmem(tile->primary_gt, vfid); @@ -1337,6 +1343,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid) static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config) { + xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt))); xe_gt_assert(gt, !xe_gt_is_media_type(gt)); lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); @@ -1355,6 +1362,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) int err; xe_gt_assert(gt, vfid); + xe_gt_assert(gt, IS_DGFX(xe)); xe_gt_assert(gt, !xe_gt_is_media_type(gt)); size = round_up(size, pf_get_lmem_alignment(gt)); @@ -1745,11 +1753,14 @@ static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *c static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); + struct xe_device *xe = gt_to_xe(gt); if (!xe_gt_is_media_type(gt)) { pf_release_vf_config_ggtt(gt, config); - pf_release_vf_config_lmem(gt, config); - pf_update_vf_lmtt(gt_to_xe(gt), vfid); + if (IS_DGFX(xe)) { + pf_release_vf_config_lmem(gt, config); + pf_update_vf_lmtt(xe, vfid); + } } pf_release_config_ctxs(gt, config); pf_release_config_dbs(gt, config); From b5e3a9b83f352a737b77a01734a6661d1130ed49 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Wed, 5 Jun 2024 09:29:48 +0200 Subject: [PATCH 502/537] drm/xe: flush engine buffers before signalling user fence on all engines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tests show that user fence signalling requires kind of write barrier, otherwise not all writes performed by the workload will be available to userspace. It is already done for render and compute, we need it also for the rest: video, gsc, copy. Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs") Signed-off-by: Andrzej Hajda Reviewed-by: Thomas Hellström Signed-off-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20240605-fix_user_fence_posted-v3-2-06e7932f784a@intel.com (cherry picked from commit 3ad7d18c5dad75ed38098c7cc3bc9594b4701399) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_ring_ops.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index d42b3f33bd7a2..aca7a9af6e846 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -80,6 +80,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i) return i; } +static int emit_flush_dw(u32 *dw, int i) +{ + dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW; + dw[i++] = 0; + dw[i++] = 0; + dw[i++] = 0; + + return i; +} + static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb, u32 *dw, int i) { @@ -234,10 +244,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); - if (job->user_fence.used) + if (job->user_fence.used) { + i = emit_flush_dw(dw, i); i = emit_store_imm_ppgtt_posted(job->user_fence.addr, job->user_fence.value, dw, i); + } i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); @@ -293,10 +305,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, i = emit_bb_start(batch_addr, ppgtt_flag, dw, i); - if (job->user_fence.used) + if (job->user_fence.used) { + i = emit_flush_dw(dw, i); i = emit_store_imm_ppgtt_posted(job->user_fence.addr, job->user_fence.value, dw, i); + } i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); From 2470b141bfae2b9695b5b6823e3b978b22d33dde Mon Sep 17 00:00:00 2001 From: Riana Tauro Date: Thu, 6 Jun 2024 15:38:42 +0530 Subject: [PATCH 503/537] drm/xe: move disable_c6 call MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit disable c6 called in guc_pc_fini_hw is unreachable. GuC PC init returns earlier if skip_guc_pc is true and never registers the finish call thus making disable_c6 unreachable. move this call to gt idle. v2: rebase v3: add fixes tag (Himal) Fixes: 975e4a3795d4 ("drm/xe: Manually setup C6 when skip_guc_pc is set") Signed-off-by: Riana Tauro Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20240606100842.956072-3-riana.tauro@intel.com Signed-off-by: Rodrigo Vivi (cherry picked from commit 6800e63cf97bae62bca56d8e691544540d945f53) Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_gt_idle.c | 7 +++++++ drivers/gpu/drm/xe/xe_guc_pc.c | 6 ------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index c699f47f4700d..944770fb2daff 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -147,6 +147,13 @@ static const struct attribute *gt_idle_attrs[] = { static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg) { struct kobject *kobj = arg; + struct xe_gt *gt = kobj_to_gt(kobj->parent); + + if (gt_to_xe(gt)->info.skip_guc_pc) { + XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)); + xe_gt_idle_disable_c6(gt); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + } sysfs_remove_files(kobj, gt_idle_attrs); kobject_put(kobj); diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 509649d0e65e1..23382ced4ea74 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -895,12 +895,6 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc) static void xe_guc_pc_fini(struct drm_device *drm, void *arg) { struct xe_guc_pc *pc = arg; - struct xe_device *xe = pc_to_xe(pc); - - if (xe->info.skip_guc_pc) { - xe_gt_idle_disable_c6(pc_to_gt(pc)); - return; - } XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL)); XE_WARN_ON(xe_guc_pc_gucrc_disable(pc)); From 8eef5c3cea65f248c99cd9dcb3f84c6509b78162 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Tue, 11 Jun 2024 09:24:55 -0700 Subject: [PATCH 504/537] Revert "igc: fix a log entry using uninitialized netdev" This reverts commit 86167183a17e03ec77198897975e9fdfbd53cb0b. igc_ptp_init() needs to be called before igc_reset(), otherwise kernel crash could be observed. Following the corresponding discussion [1] and [2] revert this commit. Link: https://lore.kernel.org/all/8fb634f8-7330-4cf4-a8ce-485af9c0a61a@intel.com/ [1] Link: https://lore.kernel.org/all/87o78rmkhu.fsf@intel.com/ [2] Fixes: 86167183a17e ("igc: fix a log entry using uninitialized netdev") Signed-off-by: Sasha Neftin Tested-by: Naama Meir Signed-off-by: Tony Nguyen Link: https://lore.kernel.org/r/20240611162456.961631-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/igc/igc_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 305e05294a265..87b655b839c1c 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -7032,6 +7032,8 @@ static int igc_probe(struct pci_dev *pdev, device_set_wakeup_enable(&adapter->pdev->dev, adapter->flags & IGC_FLAG_WOL_SUPPORTED); + igc_ptp_init(adapter); + igc_tsn_clear_schedule(adapter); /* reset the hardware with the new settings */ @@ -7053,9 +7055,6 @@ static int igc_probe(struct pci_dev *pdev, /* Check if Media Autosense is enabled */ adapter->ei = *ei; - /* do hw tstamp init after resetting */ - igc_ptp_init(adapter); - /* print pcie link status and MAC address */ pcie_print_link_status(pdev); netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); From 79f18a41dd056115d685f3b0a419c7cd40055e13 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Wed, 12 Jun 2024 06:04:46 +0000 Subject: [PATCH 505/537] ionic: fix use after netif_napi_del() When queues are started, netif_napi_add() and napi_enable() are called. If there are 4 queues and only 3 queues are used for the current configuration, only 3 queues' napi should be registered and enabled. The ionic_qcq_enable() checks whether the .poll pointer is not NULL for enabling only the using queue' napi. Unused queues' napi will not be registered by netif_napi_add(), so the .poll pointer indicates NULL. But it couldn't distinguish whether the napi was unregistered or not because netif_napi_del() doesn't reset the .poll pointer to NULL. So, ionic_qcq_enable() calls napi_enable() for the queue, which was unregistered by netif_napi_del(). Reproducer: ethtool -L rx 1 tx 1 combined 0 ethtool -L rx 0 tx 0 combined 1 ethtool -L rx 0 tx 0 combined 4 Splat looks like: kernel BUG at net/core/dev.c:6666! Oops: invalid opcode: 0000 [#1] PREEMPT SMP NOPTI CPU: 3 PID: 1057 Comm: kworker/3:3 Not tainted 6.10.0-rc2+ #16 Workqueue: events ionic_lif_deferred_work [ionic] RIP: 0010:napi_enable+0x3b/0x40 Code: 48 89 c2 48 83 e2 f6 80 b9 61 09 00 00 00 74 0d 48 83 bf 60 01 00 00 00 74 03 80 ce 01 f0 4f RSP: 0018:ffffb6ed83227d48 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff97560cda0828 RCX: 0000000000000029 RDX: 0000000000000001 RSI: 0000000000000000 RDI: ffff97560cda0a28 RBP: ffffb6ed83227d50 R08: 0000000000000400 R09: 0000000000000001 R10: 0000000000000001 R11: 0000000000000001 R12: 0000000000000000 R13: ffff97560ce3c1a0 R14: 0000000000000000 R15: ffff975613ba0a20 FS: 0000000000000000(0000) GS:ffff975d5f780000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f8f734ee200 CR3: 0000000103e50000 CR4: 00000000007506f0 PKRU: 55555554 Call Trace: ? die+0x33/0x90 ? do_trap+0xd9/0x100 ? napi_enable+0x3b/0x40 ? do_error_trap+0x83/0xb0 ? napi_enable+0x3b/0x40 ? napi_enable+0x3b/0x40 ? exc_invalid_op+0x4e/0x70 ? napi_enable+0x3b/0x40 ? asm_exc_invalid_op+0x16/0x20 ? napi_enable+0x3b/0x40 ionic_qcq_enable+0xb7/0x180 [ionic 59bdfc8a035436e1c4224ff7d10789e3f14643f8] ionic_start_queues+0xc4/0x290 [ionic 59bdfc8a035436e1c4224ff7d10789e3f14643f8] ionic_link_status_check+0x11c/0x170 [ionic 59bdfc8a035436e1c4224ff7d10789e3f14643f8] ionic_lif_deferred_work+0x129/0x280 [ionic 59bdfc8a035436e1c4224ff7d10789e3f14643f8] process_one_work+0x145/0x360 worker_thread+0x2bb/0x3d0 ? __pfx_worker_thread+0x10/0x10 kthread+0xcc/0x100 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x2d/0x50 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1a/0x30 Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling") Signed-off-by: Taehee Yoo Reviewed-by: Brett Creeley Reviewed-by: Shannon Nelson Link: https://lore.kernel.org/r/20240612060446.1754392-1-ap420073@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 24870da3f4848..1934e9d6d9e4f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -304,10 +304,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) if (ret) return ret; - if (qcq->napi.poll) - napi_enable(&qcq->napi); - if (qcq->flags & IONIC_QCQ_F_INTR) { + napi_enable(&qcq->napi); irq_set_affinity_hint(qcq->intr.vector, &qcq->intr.affinity_mask); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, From 6f4d93b78ade0a4c2cafd587f7b429ce95abb02e Mon Sep 17 00:00:00 2001 From: Ziwei Xiao Date: Wed, 12 Jun 2024 00:16:54 +0000 Subject: [PATCH 506/537] gve: Clear napi->skb before dev_kfree_skb_any() gve_rx_free_skb incorrectly leaves napi->skb referencing an skb after it is freed with dev_kfree_skb_any(). This can result in a subsequent call to napi_get_frags returning a dangling pointer. Fix this by clearing napi->skb before the skb is freed. Fixes: 9b8dd5e5ea48 ("gve: DQO: Add RX path") Cc: stable@vger.kernel.org Reported-by: Shailend Chand Signed-off-by: Ziwei Xiao Reviewed-by: Harshitha Ramamurthy Reviewed-by: Shailend Chand Reviewed-by: Praveen Kaligineedi Link: https://lore.kernel.org/r/20240612001654.923887-1-ziweixiao@google.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/google/gve/gve_rx_dqo.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index c1c912de59c72..1154c1d8f66f0 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -647,11 +647,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb, skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type); } -static void gve_rx_free_skb(struct gve_rx_ring *rx) +static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx) { if (!rx->ctx.skb_head) return; + if (rx->ctx.skb_head == napi->skb) + napi->skb = NULL; dev_kfree_skb_any(rx->ctx.skb_head); rx->ctx.skb_head = NULL; rx->ctx.skb_tail = NULL; @@ -950,7 +952,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num); if (err < 0) { - gve_rx_free_skb(rx); + gve_rx_free_skb(napi, rx); u64_stats_update_begin(&rx->statss); if (err == -ENOMEM) rx->rx_skb_alloc_fail++; @@ -993,7 +995,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget) /* gve_rx_complete_skb() will consume skb if successful */ if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { - gve_rx_free_skb(rx); + gve_rx_free_skb(napi, rx); u64_stats_update_begin(&rx->statss); rx->rx_desc_err_dropped_pkt++; u64_stats_update_end(&rx->statss); From 7da9dfdd5a3dbfd3d2450d9c6a3d1d699d625c43 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 11 Jun 2024 09:27:38 +0200 Subject: [PATCH 507/537] .editorconfig: remove trim_trailing_whitespace option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some editors (like the vim variants), when seeing "trim_whitespace" decide to do just that for all of the whitespace in the file you are saving, even if it is not on a line that you have modified. This plays havoc with diffs and is NOT something that should be intended. As the "only trim whitespace on modified lines" is not part of the editorconfig standard yet, just delete these lines from the .editorconfig file so that we don't end up with diffs that are automatically rejected by maintainers for containing things they shouldn't. Cc: Danny Lin Cc: Íñigo Huguet Cc: Mickaël Salaün Cc: Masahiro Yamada Fixes: 5a602de99797 ("Add .editorconfig file for basic formatting") Acked-by: Vincent Mailhol Link: https://lore.kernel.org/r/2024061137-jawless-dipped-e789@gregkh Signed-off-by: Greg Kroah-Hartman --- .editorconfig | 3 --- 1 file changed, 3 deletions(-) diff --git a/.editorconfig b/.editorconfig index 854773350cc5a..29a30ccfc07bf 100644 --- a/.editorconfig +++ b/.editorconfig @@ -5,7 +5,6 @@ root = true [{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}] charset = utf-8 end_of_line = lf -trim_trailing_whitespace = true insert_final_newline = true indent_style = tab indent_size = 8 @@ -13,7 +12,6 @@ indent_size = 8 [*.{json,py,rs}] charset = utf-8 end_of_line = lf -trim_trailing_whitespace = true insert_final_newline = true indent_style = space indent_size = 4 @@ -26,7 +24,6 @@ indent_size = 8 [*.yaml] charset = utf-8 end_of_line = lf -trim_trailing_whitespace = unset insert_final_newline = true indent_style = space indent_size = 2 From 7d9df38c9c037ab84502ce7eeae9f1e1e7e72603 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 12 Jun 2024 16:17:36 -0700 Subject: [PATCH 508/537] bnxt_en: Cap the size of HWRM_PORT_PHY_QCFG forwarded response Firmware interface 1.10.2.118 has increased the size of HWRM_PORT_PHY_QCFG response beyond the maximum size that can be forwarded. When the VF's link state is not the default auto state, the PF will need to forward the response back to the VF to indicate the forced state. This regression may cause the VF to fail to initialize. Fix it by capping the HWRM_PORT_PHY_QCFG response to the maximum 96 bytes. The SPEEDS2_SUPPORTED flag needs to be cleared because the new speeds2 fields are beyond the legacy structure. Also modify bnxt_hwrm_fwd_resp() to print a warning if the message size exceeds 96 bytes to make this failure more obvious. Fixes: 84a911db8305 ("bnxt_en: Update firmware interface to 1.10.2.118") Reviewed-by: Somnath Kotur Reviewed-by: Pavan Chebbi Signed-off-by: Michael Chan Link: https://lore.kernel.org/r/20240612231736.57823-1-michael.chan@broadcom.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 51 +++++++++++++++++++ .../net/ethernet/broadcom/bnxt/bnxt_sriov.c | 12 ++++- 2 files changed, 61 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 656ab81c0272d..bbc7edccd5a4d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1434,6 +1434,57 @@ struct bnxt_l2_filter { atomic_t refcnt; }; +/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The + * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h. + * The last valid byte in the compat version is different. + */ +struct hwrm_port_phy_qcfg_output_compat { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + u8 active_fec_signal_mode; + __le16 link_speed; + u8 duplex_cfg; + u8 pause; + __le16 support_speeds; + __le16 force_link_speed; + u8 auto_mode; + u8 auto_pause; + __le16 auto_link_speed; + __le16 auto_link_speed_mask; + u8 wirespeed; + u8 lpbk; + u8 force_pause; + u8 module_status; + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + u8 media_type; + u8 xcvr_pkg_type; + u8 eee_config_phy_addr; + u8 parallel_detect; + __le16 link_partner_adv_speeds; + u8 link_partner_adv_auto_mode; + u8 link_partner_adv_pause; + __le16 adv_eee_link_speed_mask; + __le16 link_partner_adv_eee_link_speed_mask; + __le32 xcvr_identifier_type_tx_lpi_timer; + __le16 fec_cfg; + u8 duplex_state; + u8 option_flags; + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + __le16 support_pam4_speeds; + __le16 force_pam4_link_speed; + __le16 auto_pam4_link_speed_mask; + u8 link_partner_pam4_adv_speeds; + u8 valid; +}; + struct bnxt_link_info { u8 phy_type; u8 media_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 175192ebaa773..22898d3d088b0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_fwd_resp_input *req; int rc; - if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) + if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) { + netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n", + msg_size); return -EINVAL; + } rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); if (!rc) { @@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) rc = bnxt_hwrm_exec_fwd_resp( bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); } else { - struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0}; + struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {}; struct hwrm_port_phy_qcfg_input *phy_qcfg_req; phy_qcfg_req = @@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) mutex_unlock(&bp->link_lock); phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + /* New SPEEDS2 fields are beyond the legacy structure, so + * clear the SPEEDS2_SUPPORTED flag. + */ + phy_qcfg_resp.option_flags &= + ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED; phy_qcfg_resp.valid = 1; if (vf->flags & BNXT_VF_LINK_UP) { From a6736a0addd60fccc3a3508461d72314cc609772 Mon Sep 17 00:00:00 2001 From: Rao Shoaib Date: Tue, 11 Jun 2024 01:46:39 -0700 Subject: [PATCH 509/537] af_unix: Read with MSG_PEEK loops if the first unread byte is OOB Read with MSG_PEEK flag loops if the first byte to read is an OOB byte. commit 22dd70eb2c3d ("af_unix: Don't peek OOB data without MSG_OOB.") addresses the loop issue but does not address the issue that no data beyond OOB byte can be read. >>> from socket import * >>> c1, c2 = socketpair(AF_UNIX, SOCK_STREAM) >>> c1.send(b'a', MSG_OOB) 1 >>> c1.send(b'b') 1 >>> c2.recv(1, MSG_PEEK | MSG_DONTWAIT) b'b' >>> from socket import * >>> c1, c2 = socketpair(AF_UNIX, SOCK_STREAM) >>> c2.setsockopt(SOL_SOCKET, SO_OOBINLINE, 1) >>> c1.send(b'a', MSG_OOB) 1 >>> c1.send(b'b') 1 >>> c2.recv(1, MSG_PEEK | MSG_DONTWAIT) b'a' >>> c2.recv(1, MSG_PEEK | MSG_DONTWAIT) b'a' >>> c2.recv(1, MSG_DONTWAIT) b'a' >>> c2.recv(1, MSG_PEEK | MSG_DONTWAIT) b'b' >>> Fixes: 314001f0bf92 ("af_unix: Add OOB support") Signed-off-by: Rao Shoaib Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20240611084639.2248934-1-Rao.Shoaib@oracle.com Signed-off-by: Jakub Kicinski --- net/unix/af_unix.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 80846279de9f3..5e695a9a609c2 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2625,18 +2625,18 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, if (skb == u->oob_skb) { if (copied) { skb = NULL; - } else if (sock_flag(sk, SOCK_URGINLINE)) { - if (!(flags & MSG_PEEK)) { + } else if (!(flags & MSG_PEEK)) { + if (sock_flag(sk, SOCK_URGINLINE)) { WRITE_ONCE(u->oob_skb, NULL); consume_skb(skb); + } else { + __skb_unlink(skb, &sk->sk_receive_queue); + WRITE_ONCE(u->oob_skb, NULL); + unlinked_skb = skb; + skb = skb_peek(&sk->sk_receive_queue); } - } else if (flags & MSG_PEEK) { - skb = NULL; - } else { - __skb_unlink(skb, &sk->sk_receive_queue); - WRITE_ONCE(u->oob_skb, NULL); - unlinked_skb = skb; - skb = skb_peek(&sk->sk_receive_queue); + } else if (!sock_flag(sk, SOCK_URGINLINE)) { + skb = skb_peek_next(skb, &sk->sk_receive_queue); } } From a9b9741854a9fe9df948af49ca5514e0ed0429df Mon Sep 17 00:00:00 2001 From: Aleksandr Mishin Date: Tue, 11 Jun 2024 11:25:46 +0300 Subject: [PATCH 510/537] bnxt_en: Adjust logging of firmware messages in case of released token in __hwrm_send() In case of token is released due to token->state == BNXT_HWRM_DEFERRED, released token (set to NULL) is used in log messages. This issue is expected to be prevented by HWRM_ERR_CODE_PF_UNAVAILABLE error code. But this error code is returned by recent firmware. So some firmware may not return it. This may lead to NULL pointer dereference. Adjust this issue by adding token pointer check. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 8fa4219dba8e ("bnxt_en: add dynamic debug support for HWRM messages") Suggested-by: Michael Chan Signed-off-by: Aleksandr Mishin Reviewed-by: Wojciech Drewek Reviewed-by: Michael Chan Link: https://lore.kernel.org/r/20240611082547.12178-1-amishin@t-argos.ru Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index 1df3d56cc4b51..d2fd2d04ed474 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) req_type); else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE) hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", - req_type, token->seq_id, rc); + req_type, le16_to_cpu(ctx->req->seq_id), rc); rc = __hwrm_to_stderr(rc); exit: if (token) From ff0ffe5b7c3c12c6e0cca16652905963ae817b44 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 13 Jun 2024 09:36:50 -0700 Subject: [PATCH 511/537] nvme: fix namespace removal list This function wants to move a subset of a list from one element to the tail into another list. It also needs to use the srcu synchronize instead of the regular rcu version. Do this one element at a time because that's the only to do it. Fixes: be647e2c76b27f4 ("nvme: use srcu for iterating namespace list") Reported-by: Venkat Rao Bagalkote Tested-by: Venkat Rao Bagalkote Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c40930d10bd34..782090ce0bc10 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3960,12 +3960,13 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, mutex_lock(&ctrl->namespaces_lock); list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { - if (ns->head->ns_id > nsid) - list_splice_init_rcu(&ns->list, &rm_list, - synchronize_rcu); + if (ns->head->ns_id > nsid) { + list_del_rcu(&ns->list); + synchronize_srcu(&ctrl->srcu); + list_add_tail_rcu(&ns->list, &rm_list); + } } mutex_unlock(&ctrl->namespaces_lock); - synchronize_srcu(&ctrl->srcu); list_for_each_entry_safe(ns, next, &rm_list, list) nvme_ns_remove(ns); From f4a1254f2a076afb0edd473589bf40f9b4d36b41 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 14 Jun 2024 01:04:29 +0100 Subject: [PATCH 512/537] io_uring: fix cancellation overwriting req->flags Only the current owner of a request is allowed to write into req->flags. Hence, the cancellation path should never touch it. Add a new field instead of the flag, move it into the 3rd cache line because it should always be initialised. poll_refs can move further as polling is an involved process anyway. It's a minimal patch, in the future we can and should find a better place for it and remove now unused REQ_F_CANCEL_SEQ. Fixes: 521223d7c229f ("io_uring/cancel: don't default to setting req->work.cancel_seq") Cc: stable@vger.kernel.org Reported-by: Li Shi Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6827b129f8f0ad76fa9d1f0a773de938b240ffab.1718323430.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 3 ++- io_uring/cancel.h | 4 ++-- io_uring/io_uring.c | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 7a6b190c7da74..b48570eaa4497 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -648,7 +648,7 @@ struct io_kiocb { struct io_rsrc_node *rsrc_node; atomic_t refs; - atomic_t poll_refs; + bool cancel_seq_set; struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; @@ -657,6 +657,7 @@ struct io_kiocb { /* opcode allocated if it needs to store data for async defer */ void *async_data; /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */ + atomic_t poll_refs; struct io_kiocb *link; /* custom credentials, valid IFF REQ_F_CREDS is set */ const struct cred *creds; diff --git a/io_uring/cancel.h b/io_uring/cancel.h index 76b32e65c03cd..b33995e00ba90 100644 --- a/io_uring/cancel.h +++ b/io_uring/cancel.h @@ -27,10 +27,10 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd); static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence) { - if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq) + if (req->cancel_seq_set && sequence == req->work.cancel_seq) return true; - req->flags |= REQ_F_CANCEL_SEQ; + req->cancel_seq_set = true; req->work.cancel_seq = sequence; return false; } diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 816e93e7f9490..154b25b8a613b 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2058,6 +2058,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->file = NULL; req->rsrc_node = NULL; req->task = current; + req->cancel_seq_set = false; if (unlikely(opcode >= IORING_OP_LAST)) { req->opcode = 0; From 22f00812862564b314784167a89f27b444f82a46 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 13 Jun 2024 21:30:43 -0400 Subject: [PATCH 513/537] USB: class: cdc-wdm: Fix CPU lockup caused by excessive log messages The syzbot fuzzer found that the interrupt-URB completion callback in the cdc-wdm driver was taking too long, and the driver's immediate resubmission of interrupt URBs with -EPROTO status combined with the dummy-hcd emulation to cause a CPU lockup: cdc_wdm 1-1:1.0: nonzero urb status received: -71 cdc_wdm 1-1:1.0: wdm_int_callback - 0 bytes watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [syz-executor782:6625] CPU#0 Utilization every 4s during lockup: #1: 98% system, 0% softirq, 3% hardirq, 0% idle #2: 98% system, 0% softirq, 3% hardirq, 0% idle #3: 98% system, 0% softirq, 3% hardirq, 0% idle #4: 98% system, 0% softirq, 3% hardirq, 0% idle #5: 98% system, 1% softirq, 3% hardirq, 0% idle Modules linked in: irq event stamp: 73096 hardirqs last enabled at (73095): [] console_emit_next_record kernel/printk/printk.c:2935 [inline] hardirqs last enabled at (73095): [] console_flush_all+0x650/0xb74 kernel/printk/printk.c:2994 hardirqs last disabled at (73096): [] __el1_irq arch/arm64/kernel/entry-common.c:533 [inline] hardirqs last disabled at (73096): [] el1_interrupt+0x24/0x68 arch/arm64/kernel/entry-common.c:551 softirqs last enabled at (73048): [] softirq_handle_end kernel/softirq.c:400 [inline] softirqs last enabled at (73048): [] handle_softirqs+0xa60/0xc34 kernel/softirq.c:582 softirqs last disabled at (73043): [] __do_softirq+0x14/0x20 kernel/softirq.c:588 CPU: 0 PID: 6625 Comm: syz-executor782 Tainted: G W 6.10.0-rc2-syzkaller-g8867bbd4a056 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/02/2024 Testing showed that the problem did not occur if the two error messages -- the first two lines above -- were removed; apparently adding material to the kernel log takes a surprisingly large amount of time. In any case, the best approach for preventing these lockups and to avoid spamming the log with thousands of error messages per second is to ratelimit the two dev_err() calls. Therefore we replace them with dev_err_ratelimited(). Signed-off-by: Alan Stern Suggested-by: Greg KH Reported-and-tested-by: syzbot+5f996b83575ef4058638@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-usb/00000000000073d54b061a6a1c65@google.com/ Reported-and-tested-by: syzbot+1b2abad17596ad03dcff@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-usb/000000000000f45085061aa9b37e@google.com/ Fixes: 9908a32e94de ("USB: remove err() macro from usb class drivers") Link: https://lore.kernel.org/linux-usb/40dfa45b-5f21-4eef-a8c1-51a2f320e267@rowland.harvard.edu/ Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/29855215-52f5-4385-b058-91f42c2bee18@rowland.harvard.edu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index c553decb54610..6830be4419e20 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb) dev_err(&desc->intf->dev, "Stall on int endpoint\n"); goto sw; /* halt is cleared in work */ default: - dev_err(&desc->intf->dev, + dev_err_ratelimited(&desc->intf->dev, "nonzero urb status received: %d\n", status); break; } } if (urb->actual_length < sizeof(struct usb_cdc_notification)) { - dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n", + dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n", urb->actual_length); goto exit; } From 41f590e31c6c8b8a0a490b7c1ad2e57c20ec3d9b Mon Sep 17 00:00:00 2001 From: pengfuyuan Date: Thu, 6 Jun 2024 20:08:42 +0800 Subject: [PATCH 514/537] arm/komeda: Remove all CONFIG_DEBUG_FS conditional compilations Since the debugfs functions have no-op stubs for CONFIG_DEBUG_FS=n, the compiler will optimize the rest away since they are no longer referenced. The benefit of removing the conditional compilation is that the build is actually tested for both CONFIG_DEBUG_FS configuration values. Assuming most developers have it enabled, CONFIG_DEBUG_FS=n is not tested much and may fail the build due to the conditional compilation. Reported-by: k2ci Signed-off-by: pengfuyuan Acked-by: Liviu Dudau Link: https://patchwork.freedesktop.org/patch/msgid/20240606120842.1377267-1-pengfuyuan@kylinos.cn Signed-off-by: Liviu Dudau Signed-off-by: Maxime Ripard --- drivers/gpu/drm/arm/display/komeda/komeda_dev.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index 14ee79becacb5..5ba62e637a616 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -12,10 +12,8 @@ #include #include #include -#ifdef CONFIG_DEBUG_FS #include #include -#endif #include @@ -43,7 +41,6 @@ static int komeda_register_show(struct seq_file *sf, void *x) DEFINE_SHOW_ATTRIBUTE(komeda_register); -#ifdef CONFIG_DEBUG_FS static void komeda_debugfs_init(struct komeda_dev *mdev) { if (!debugfs_initialized()) @@ -55,7 +52,6 @@ static void komeda_debugfs_init(struct komeda_dev *mdev) debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root, &mdev->err_verbosity); } -#endif static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -265,9 +261,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev) mdev->err_verbosity = KOMEDA_DEV_PRINT_ERR_EVENTS; -#ifdef CONFIG_DEBUG_FS komeda_debugfs_init(mdev); -#endif return mdev; @@ -286,9 +280,7 @@ void komeda_dev_destroy(struct komeda_dev *mdev) sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group); -#ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(mdev->debugfs_root); -#endif if (mdev->aclk) clk_prepare_enable(mdev->aclk); From a6a75edc8669a4f030546c7390808ef0cc034742 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 13 Jun 2024 19:33:53 +0200 Subject: [PATCH 515/537] ata: libata-scsi: Set the RMB bit only for removable media devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The SCSI Removable Media Bit (RMB) should only be set for removable media, where the device stays and the media changes, e.g. CD-ROM or floppy. The ATA removable media device bit is obsoleted since ATA-8 ACS (2006), but before that it was used to indicate that the device can have its media removed (while the device stays). Commit 8a3e33cf92c7 ("ata: ahci: find eSATA ports and flag them as removable") introduced a change to set the RMB bit if the port has either the eSATA bit or the hot-plug capable bit set. The reasoning was that the author wanted his eSATA ports to get treated like a USB stick. This is however wrong. See "20-082r23SPC-6: Removable Medium Bit Expectations" which has since been integrated to SPC, which states that: """ Reports have been received that some USB Memory Stick device servers set the removable medium (RMB) bit to one. The rub comes when the medium is actually removed, because... The device server is removed concurrently with the medium removal. If there is no device server, then there is no device server that is waiting to have removable medium inserted. Sufficient numbers of SCSI analysts see such a device: - not as a device that supports removable medium; but - as a removable, hot pluggable device. """ The definition of the RMB bit in the SPC specification has since been clarified to match this. Thus, a USB stick should not have the RMB bit set (and neither shall an eSATA nor a hot-plug capable port). Commit dc8b4afc4a04 ("ata: ahci: don't mark HotPlugCapable Ports as external/removable") then changed so that the RMB bit is only set for the eSATA bit (and not for the hot-plug capable bit), because of a lot of bug reports of SATA devices were being automounted by udisks. However, treating eSATA and hot-plug capable ports differently is not correct. From the AHCI 1.3.1 spec: Hot Plug Capable Port (HPCP): When set to '1', indicates that this port's signal and power connectors are externally accessible via a joint signal and power connector for blindmate device hot plug. So a hot-plug capable port is an external port, just like commit 45b96d65ec68 ("ata: ahci: a hotplug capable port is an external port") claims. In order to not violate the SPC specification, modify the SCSI INQUIRY data to only set the RMB bit if the ATA device can have its media removed. This fixes a reported problem where GNOME/udisks was automounting devices connected to hot-plug capable ports. Fixes: 45b96d65ec68 ("ata: ahci: a hotplug capable port is an external port") Cc: stable@vger.kernel.org Reviewed-by: Mario Limonciello Reviewed-by: Thomas Weißschuh Tested-by: Thomas Weißschuh Reported-by: Thomas Weißschuh Closes: https://lore.kernel.org/linux-ide/c0de8262-dc4b-4c22-9fac-33432e5bddd3@t-8ch.de/ Signed-off-by: Damien Le Moal [cassel: wrote commit message] Signed-off-by: Niklas Cassel --- drivers/ata/libata-scsi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index cdf29b178ddc1..bb4d30d377ae5 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1831,11 +1831,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 2 }; - /* set scsi removable (RMB) bit per ata bit, or if the - * AHCI port says it's external (Hotplug-capable, eSATA). + /* + * Set the SCSI Removable Media Bit (RMB) if the ATA removable media + * device bit (obsolete since ATA-8 ACS) is set. */ - if (ata_id_removable(args->id) || - (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL)) + if (ata_id_removable(args->id)) hdr[1] |= (1 << 7); if (args->dev->class == ATA_DEV_ZAC) { From 5f75e081ab5cbfbe7aca2112a802e69576ee9778 Mon Sep 17 00:00:00 2001 From: Cyril Hrubis Date: Thu, 13 Jun 2024 18:38:17 +0200 Subject: [PATCH 516/537] loop: Disable fallocate() zero and discard if not supported If fallcate is implemented but zero and discard operations are not supported by the filesystem the backing file is on we continue to fill dmesg with errors from the blk_mq_end_request() since each time we call fallocate() on the loop device the EOPNOTSUPP error from lo_fallocate() ends up propagated into the block layer. In the end syscall succeeds since the blkdev_issue_zeroout() falls back to writing zeroes which makes the errors even more misleading and confusing. How to reproduce: 1. make sure /tmp is mounted as tmpfs 2. dd if=/dev/zero of=/tmp/disk.img bs=1M count=100 3. losetup /dev/loop0 /tmp/disk.img 4. mkfs.ext2 /dev/loop0 5. dmesg |tail [710690.898214] operation not supported error, dev loop0, sector 204672 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.898279] operation not supported error, dev loop0, sector 522 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.898603] operation not supported error, dev loop0, sector 16906 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.898917] operation not supported error, dev loop0, sector 32774 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.899218] operation not supported error, dev loop0, sector 49674 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.899484] operation not supported error, dev loop0, sector 65542 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.899743] operation not supported error, dev loop0, sector 82442 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.900015] operation not supported error, dev loop0, sector 98310 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.900276] operation not supported error, dev loop0, sector 115210 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 [710690.900546] operation not supported error, dev loop0, sector 131078 op 0x9:(WRITE_ZEROES) flags 0x8000800 phys_seg 0 prio class 0 This patch changes the lo_fallocate() to clear the flags for zero and discard operations if we get EOPNOTSUPP from the backing file fallocate callback, that way we at least stop spewing errors after the first unsuccessful try. CC: Jan Kara Signed-off-by: Cyril Hrubis Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20240613163817.22640-1-chrubis@suse.cz Signed-off-by: Jens Axboe --- drivers/block/loop.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 93780f41646b7..1153721bc7c25 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -302,6 +302,21 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq, return 0; } +static void loop_clear_limits(struct loop_device *lo, int mode) +{ + struct queue_limits lim = queue_limits_start_update(lo->lo_queue); + + if (mode & FALLOC_FL_ZERO_RANGE) + lim.max_write_zeroes_sectors = 0; + + if (mode & FALLOC_FL_PUNCH_HOLE) { + lim.max_hw_discard_sectors = 0; + lim.discard_granularity = 0; + } + + queue_limits_commit_update(lo->lo_queue, &lim); +} + static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, int mode) { @@ -320,6 +335,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) return -EIO; + + /* + * We initially configure the limits in a hope that fallocate is + * supported and clear them here if that turns out not to be true. + */ + if (unlikely(ret == -EOPNOTSUPP)) + loop_clear_limits(lo, mode); + return ret; } From e789523fe248a80839f8dfe736ca96e5c442a9e8 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 18:03:43 +0900 Subject: [PATCH 517/537] firewire: fix website URL in Kconfig The wiki in kernel.org is no longer updated. This commit replaces the website URL with the latest one. Link: https://lore.kernel.org/r/20240613090343.416198-1-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index 869598b20e3a0..5268b3f0a25a5 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig @@ -11,7 +11,7 @@ config FIREWIRE This is the new-generation IEEE 1394 (FireWire) driver stack a.k.a. Juju, a new implementation designed for robustness and simplicity. - See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration + See http://ieee1394.docs.kernel.org/en/latest/migration.html for information about migration from the older Linux 1394 stack to the new driver stack. From e7da16abf030b39c3598574d5457f324a6f0e4a7 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 22:14:33 +0900 Subject: [PATCH 518/537] firewire: core: record card index in tracepoinrts events derived from async_outbound_complete_template The asynchronous transaction is initiated on one of 1394 OHCI controller, however the existing tracepoints events has the lack of data about it. This commit adds card_index member into event structure to store the index of host controller in use, and prints it. Link: https://lore.kernel.org/r/20240613131440.431766-2-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/core-transaction.c | 6 +++--- include/trace/events/firewire.h | 17 ++++++++++------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 571fdff65c2bc..de75e758fd072 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -174,8 +174,8 @@ static void transmit_complete_callback(struct fw_packet *packet, struct fw_transaction *t = container_of(packet, struct fw_transaction, packet); - trace_async_request_outbound_complete((uintptr_t)t, packet->generation, packet->speed, - status, packet->timestamp); + trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation, + packet->speed, status, packet->timestamp); switch (status) { case ACK_COMPLETE: @@ -674,7 +674,7 @@ static void free_response_callback(struct fw_packet *packet, { struct fw_request *request = container_of(packet, struct fw_request, response); - trace_async_response_outbound_complete((uintptr_t)request, packet->generation, + trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation, packet->speed, status, packet->timestamp); // Decrease the reference count since not at in-flight. diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h index d695a560673f7..ca6ea9bd1ebaf 100644 --- a/include/trace/events/firewire.h +++ b/include/trace/events/firewire.h @@ -71,10 +71,11 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template, // The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem. DECLARE_EVENT_CLASS(async_outbound_complete_template, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), - TP_ARGS(transaction, generation, scode, status, timestamp), + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp), TP_STRUCT__entry( __field(u64, transaction) + __field(u8, card_index) __field(u8, generation) __field(u8, scode) __field(u8, status) @@ -82,14 +83,16 @@ DECLARE_EVENT_CLASS(async_outbound_complete_template, ), TP_fast_assign( __entry->transaction = transaction; + __entry->card_index = card_index; __entry->generation = generation; __entry->scode = scode; __entry->status = status; __entry->timestamp = timestamp; ), TP_printk( - "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x", + "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x", __entry->transaction, + __entry->card_index, __entry->generation, __entry->scode, __entry->status, @@ -144,8 +147,8 @@ DEFINE_EVENT(async_outbound_initiate_template, async_request_outbound_initiate, ); DEFINE_EVENT(async_outbound_complete_template, async_request_outbound_complete, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), - TP_ARGS(transaction, generation, scode, status, timestamp) + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp) ); DEFINE_EVENT(async_inbound_template, async_response_inbound, @@ -194,8 +197,8 @@ DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_ini ); DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), - TP_ARGS(transaction, generation, scode, status, timestamp) + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp) ); #undef ASYNC_HEADER_GET_DESTINATION From 64e02b64fb1d832a9a5254055a829db64750421a Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 22:14:34 +0900 Subject: [PATCH 519/537] firewire: core: record card index in tracepoinrts events derived from async_outbound_initiate_template The asynchronous transaction is initiated on one of 1394 OHCI controller, however the existing tracepoints events has the lack of data about it. This commit adds card_index member into event structure to store the index of host controller in use, and prints it. Link: https://lore.kernel.org/r/20240613131440.431766-3-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/core-transaction.c | 10 ++++++---- include/trace/events/firewire.h | 20 ++++++++++++-------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index de75e758fd072..3f9361d156076 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -398,7 +398,8 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode spin_unlock_irqrestore(&card->lock, flags); - trace_async_request_outbound_initiate((uintptr_t)t, generation, speed, t->packet.header, payload, + trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed, + t->packet.header, payload, tcode_is_read_request(tcode) ? 0 : length / 4); card->driver->send_request(card, &t->packet); @@ -879,9 +880,10 @@ void fw_send_response(struct fw_card *card, // Increase the reference count so that the object is kept during in-flight. fw_request_get(request); - trace_async_response_outbound_initiate((uintptr_t)request, request->response.generation, - request->response.speed, request->response.header, - data, data ? data_length / 4 : 0); + trace_async_response_outbound_initiate((uintptr_t)request, card->index, + request->response.generation, request->response.speed, + request->response.header, data, + data ? data_length / 4 : 0); card->driver->send_response(card, &request->response); } diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h index ca6ea9bd1ebaf..a3d9916cbad19 100644 --- a/include/trace/events/firewire.h +++ b/include/trace/events/firewire.h @@ -36,10 +36,11 @@ #define QUADLET_SIZE 4 DECLARE_EVENT_CLASS(async_outbound_initiate_template, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), - TP_ARGS(transaction, generation, scode, header, data, data_count), + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, header, data, data_count), TP_STRUCT__entry( __field(u64, transaction) + __field(u8, card_index) __field(u8, generation) __field(u8, scode) __array(u32, header, ASYNC_HEADER_QUADLET_COUNT) @@ -47,6 +48,7 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template, ), TP_fast_assign( __entry->transaction = transaction; + __entry->card_index = card_index; __entry->generation = generation; __entry->scode = scode; memcpy(__entry->header, header, QUADLET_SIZE * ASYNC_HEADER_QUADLET_COUNT); @@ -54,8 +56,9 @@ DECLARE_EVENT_CLASS(async_outbound_initiate_template, ), // This format is for the request subaction. TP_printk( - "transaction=0x%llx generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s", + "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s", __entry->transaction, + __entry->card_index, __entry->generation, __entry->scode, ASYNC_HEADER_GET_DESTINATION(__entry->header), @@ -142,8 +145,8 @@ DECLARE_EVENT_CLASS(async_inbound_template, ); DEFINE_EVENT(async_outbound_initiate_template, async_request_outbound_initiate, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), - TP_ARGS(transaction, generation, scode, header, data, data_count) + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, header, data, data_count) ); DEFINE_EVENT(async_outbound_complete_template, async_request_outbound_complete, @@ -178,11 +181,12 @@ DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound, ); DEFINE_EVENT_PRINT(async_outbound_initiate_template, async_response_outbound_initiate, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), - TP_ARGS(transaction, generation, scode, header, data, data_count), + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, header, data, data_count), TP_printk( - "transaction=0x%llx generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s", + "transaction=0x%llx card_index=%u generation=%u scode=%u dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s", __entry->transaction, + __entry->card_index, __entry->generation, __entry->scode, ASYNC_HEADER_GET_DESTINATION(__entry->header), From 65ec7ebefe7de01281cce1f552ebd4dd00386665 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 22:14:35 +0900 Subject: [PATCH 520/537] firewire: core: record card index in tracepoinrts events derived from async_inbound_template The asynchronous transaction is initiated on one of 1394 OHCI controller, however the existing tracepoints events has the lack of data about it. This commit adds card_index member into event structure to store the index of host controller in use, and prints it. Link: https://lore.kernel.org/r/20240613131440.431766-4-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/core-transaction.c | 8 ++++---- include/trace/events/firewire.h | 20 ++++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 3f9361d156076..3503c238f8ae2 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -1009,8 +1009,8 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) return; } - trace_async_request_inbound((uintptr_t)request, p->generation, p->speed, p->ack, - p->timestamp, p->header, request->data, + trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed, + p->ack, p->timestamp, p->header, request->data, tcode_is_read_request(tcode) ? 0 : request->length / 4); offset = async_header_get_offset(p->header); @@ -1080,8 +1080,8 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) } spin_unlock_irqrestore(&card->lock, flags); - trace_async_response_inbound((uintptr_t)t, p->generation, p->speed, p->ack, p->timestamp, - p->header, data, data_length / 4); + trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack, + p->timestamp, p->header, data, data_length / 4); if (!t) { timed_out: diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h index a3d9916cbad19..b72f613cfa025 100644 --- a/include/trace/events/firewire.h +++ b/include/trace/events/firewire.h @@ -105,10 +105,11 @@ DECLARE_EVENT_CLASS(async_outbound_complete_template, // The value of status is one of ack codes and rcodes specific to Linux FireWire subsystem. DECLARE_EVENT_CLASS(async_inbound_template, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), - TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count), + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count), TP_STRUCT__entry( __field(u64, transaction) + __field(u8, card_index) __field(u8, generation) __field(u8, scode) __field(u8, status) @@ -118,6 +119,7 @@ DECLARE_EVENT_CLASS(async_inbound_template, ), TP_fast_assign( __entry->transaction = transaction; + __entry->card_index = card_index; __entry->generation = generation; __entry->scode = scode; __entry->status = status; @@ -127,8 +129,9 @@ DECLARE_EVENT_CLASS(async_inbound_template, ), // This format is for the response subaction. TP_printk( - "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s", + "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x rcode=%u header=%s data=%s", __entry->transaction, + __entry->card_index, __entry->generation, __entry->scode, __entry->status, @@ -155,16 +158,17 @@ DEFINE_EVENT(async_outbound_complete_template, async_request_outbound_complete, ); DEFINE_EVENT(async_inbound_template, async_response_inbound, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), - TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count) + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count) ); DEFINE_EVENT_PRINT(async_inbound_template, async_request_inbound, - TP_PROTO(u64 transaction, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), - TP_ARGS(transaction, generation, scode, status, timestamp, header, data, data_count), + TP_PROTO(u64 transaction, unsigned int card_index, unsigned int generation, unsigned int scode, unsigned int status, unsigned int timestamp, const u32 *header, const u32 *data, unsigned int data_count), + TP_ARGS(transaction, card_index, generation, scode, status, timestamp, header, data, data_count), TP_printk( - "transaction=0x%llx generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s", + "transaction=0x%llx card_index=%u generation=%u scode=%u status=%u timestamp=0x%04x dst_id=0x%04x tlabel=%u tcode=%u src_id=0x%04x offset=0x%012llx header=%s data=%s", __entry->transaction, + __entry->card_index, __entry->generation, __entry->scode, __entry->status, From 3cb44a72a39835b368ab78d739819330089aa2bf Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 22:14:36 +0900 Subject: [PATCH 521/537] firewire: core: record card index in async_phy_outbound_initiate tracepoints event The asynchronous transaction is initiated on one of 1394 OHCI controller, however the existing tracepoints events has the lack of data about it. This commit adds card_index member into event structure to store the index of host controller in use, and prints it. Link: https://lore.kernel.org/r/20240613131440.431766-5-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/core-cdev.c | 4 ++-- drivers/firewire/core-transaction.c | 2 +- include/trace/events/firewire.h | 9 ++++++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 55993c9e0b904..ff8739f96af5b 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1659,8 +1659,8 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg) memcpy(pp->data, a->data, sizeof(a->data)); } - trace_async_phy_outbound_initiate((uintptr_t)&e->p, e->p.generation, e->p.header[1], - e->p.header[2]); + trace_async_phy_outbound_initiate((uintptr_t)&e->p, card->index, e->p.generation, + e->p.header[1], e->p.header[2]); card->driver->send_request(card, &e->p); diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 3503c238f8ae2..e522dc3d98970 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -504,7 +504,7 @@ void fw_send_phy_config(struct fw_card *card, phy_config_packet.generation = generation; reinit_completion(&phy_config_done); - trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, + trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index, phy_config_packet.generation, phy_config_packet.header[1], phy_config_packet.header[2]); diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h index b72f613cfa025..a59dc26b2a536 100644 --- a/include/trace/events/firewire.h +++ b/include/trace/events/firewire.h @@ -217,23 +217,26 @@ DEFINE_EVENT(async_outbound_complete_template, async_response_outbound_complete, #undef ASYNC_HEADER_GET_RCODE TRACE_EVENT(async_phy_outbound_initiate, - TP_PROTO(u64 packet, unsigned int generation, u32 first_quadlet, u32 second_quadlet), - TP_ARGS(packet, generation, first_quadlet, second_quadlet), + TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, u32 first_quadlet, u32 second_quadlet), + TP_ARGS(packet, card_index, generation, first_quadlet, second_quadlet), TP_STRUCT__entry( __field(u64, packet) + __field(u8, card_index) __field(u8, generation) __field(u32, first_quadlet) __field(u32, second_quadlet) ), TP_fast_assign( __entry->packet = packet; + __entry->card_index = card_index; __entry->generation = generation; __entry->first_quadlet = first_quadlet; __entry->second_quadlet = second_quadlet ), TP_printk( - "packet=0x%llx generation=%u first_quadlet=0x%08x second_quadlet=0x%08x", + "packet=0x%llx card_index=%u generation=%u first_quadlet=0x%08x second_quadlet=0x%08x", __entry->packet, + __entry->card_index, __entry->generation, __entry->first_quadlet, __entry->second_quadlet From 810f2aa83563020d834425018303f2aaecef1e6e Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 22:14:37 +0900 Subject: [PATCH 522/537] firewire: core: record card index in async_phy_outbound_complete tracepoints event The asynchronous transmission of phy packet is initiated on one of 1394 OHCI controller, however the existing tracepoints events has the lack of data about it. This commit adds card_index member into event structure to store the index of host controller in use, and prints it. Link: https://lore.kernel.org/r/20240613131440.431766-6-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/core-cdev.c | 2 +- drivers/firewire/core-transaction.c | 2 +- include/trace/events/firewire.h | 9 ++++++--- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index ff8739f96af5b..9a7dc90330a35 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1559,7 +1559,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet, struct client *e_client = e->client; u32 rcode; - trace_async_phy_outbound_complete((uintptr_t)packet, status, packet->generation, + trace_async_phy_outbound_complete((uintptr_t)packet, card->index, status, packet->generation, packet->timestamp); switch (status) { diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index e522dc3d98970..bd5a467cfd603 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -464,7 +464,7 @@ static DECLARE_COMPLETION(phy_config_done); static void transmit_phy_packet_callback(struct fw_packet *packet, struct fw_card *card, int status) { - trace_async_phy_outbound_complete((uintptr_t)packet, packet->generation, status, + trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status, packet->timestamp); complete(&phy_config_done); } diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h index a59dc26b2a536..61c7a2461fbc6 100644 --- a/include/trace/events/firewire.h +++ b/include/trace/events/firewire.h @@ -244,23 +244,26 @@ TRACE_EVENT(async_phy_outbound_initiate, ); TRACE_EVENT(async_phy_outbound_complete, - TP_PROTO(u64 packet, unsigned int generation, unsigned int status, unsigned int timestamp), - TP_ARGS(packet, generation, status, timestamp), + TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp), + TP_ARGS(packet, card_index, generation, status, timestamp), TP_STRUCT__entry( __field(u64, packet) + __field(u8, card_index) __field(u8, generation) __field(u8, status) __field(u16, timestamp) ), TP_fast_assign( __entry->packet = packet; + __entry->card_index = card_index; __entry->generation = generation; __entry->status = status; __entry->timestamp = timestamp; ), TP_printk( - "packet=0x%llx generation=%u status=%u timestamp=0x%04x", + "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x", __entry->packet, + __entry->card_index, __entry->generation, __entry->status, __entry->timestamp From abbb4bd96d7f871b10bd7058f7284ffaf4e8257f Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 22:14:38 +0900 Subject: [PATCH 523/537] firewire: core: record card index in async_phy_inbound tracepoints event The asynchronous transmission of phy packet is initiated on one of 1394 OHCI controller, however the existing tracepoints events has the lack of data about it. This commit adds card_index member into event structure to store the index of host controller in use, and prints it. Link: https://lore.kernel.org/r/20240613131440.431766-7-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/core-transaction.c | 2 +- include/trace/events/firewire.h | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index bd5a467cfd603..76ab6a2097685 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -997,7 +997,7 @@ void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) tcode = async_header_get_tcode(p->header); if (tcode_is_link_internal(tcode)) { - trace_async_phy_inbound((uintptr_t)p, p->generation, p->ack, p->timestamp, + trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp, p->header[1], p->header[2]); fw_cdev_handle_phy_packet(card, p); return; diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h index 61c7a2461fbc6..e5524fc718801 100644 --- a/include/trace/events/firewire.h +++ b/include/trace/events/firewire.h @@ -271,10 +271,11 @@ TRACE_EVENT(async_phy_outbound_complete, ); TRACE_EVENT(async_phy_inbound, - TP_PROTO(u64 packet, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet), - TP_ARGS(packet, generation, status, timestamp, first_quadlet, second_quadlet), + TP_PROTO(u64 packet, unsigned int card_index, unsigned int generation, unsigned int status, unsigned int timestamp, u32 first_quadlet, u32 second_quadlet), + TP_ARGS(packet, card_index, generation, status, timestamp, first_quadlet, second_quadlet), TP_STRUCT__entry( __field(u64, packet) + __field(u8, card_index) __field(u8, generation) __field(u8, status) __field(u16, timestamp) @@ -290,8 +291,9 @@ TRACE_EVENT(async_phy_inbound, __entry->second_quadlet = second_quadlet ), TP_printk( - "packet=0x%llx generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x", + "packet=0x%llx card_index=%u generation=%u status=%u timestamp=0x%04x first_quadlet=0x%08x second_quadlet=0x%08x", __entry->packet, + __entry->card_index, __entry->generation, __entry->status, __entry->timestamp, From 7507dbc46b780e9fa2ec3d4db7cd9ce07e722927 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 22:14:39 +0900 Subject: [PATCH 524/537] firewire: core: record card index in tracepoinrts events derived from bus_reset_arrange_template The asynchronous transmission of phy packet is initiated on one of 1394 OHCI controller, however the existing tracepoints events has the lack of data about it. This commit adds card_index member into event structure to store the index of host controller in use, and prints it. Link: https://lore.kernel.org/r/20240613131440.431766-8-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/core-card.c | 6 +++--- include/trace/events/firewire.h | 21 ++++++++++++--------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c index 127d87e3a1530..f8b99dd6cd827 100644 --- a/drivers/firewire/core-card.c +++ b/drivers/firewire/core-card.c @@ -222,14 +222,14 @@ static int reset_bus(struct fw_card *card, bool short_reset) int reg = short_reset ? 5 : 1; int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET; - trace_bus_reset_initiate(card->generation, short_reset); + trace_bus_reset_initiate(card->index, card->generation, short_reset); return card->driver->update_phy_reg(card, reg, 0, bit); } void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset) { - trace_bus_reset_schedule(card->generation, short_reset); + trace_bus_reset_schedule(card->index, card->generation, short_reset); /* We don't try hard to sort out requests of long vs. short resets. */ card->br_short = short_reset; @@ -249,7 +249,7 @@ static void br_work(struct work_struct *work) /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */ if (card->reset_jiffies != 0 && time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) { - trace_bus_reset_postpone(card->generation, card->br_short); + trace_bus_reset_postpone(card->index, card->generation, card->br_short); if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ)) fw_card_put(card); diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h index e5524fc718801..e6485051f546d 100644 --- a/include/trace/events/firewire.h +++ b/include/trace/events/firewire.h @@ -303,36 +303,39 @@ TRACE_EVENT(async_phy_inbound, ); DECLARE_EVENT_CLASS(bus_reset_arrange_template, - TP_PROTO(unsigned int generation, bool short_reset), - TP_ARGS(generation, short_reset), + TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset), + TP_ARGS(card_index, generation, short_reset), TP_STRUCT__entry( + __field(u8, card_index) __field(u8, generation) __field(bool, short_reset) ), TP_fast_assign( + __entry->card_index = card_index; __entry->generation = generation; __entry->short_reset = short_reset; ), TP_printk( - "generation=%u short_reset=%s", + "card_index=%u generation=%u short_reset=%s", + __entry->card_index, __entry->generation, __entry->short_reset ? "true" : "false" ) ); DEFINE_EVENT(bus_reset_arrange_template, bus_reset_initiate, - TP_PROTO(unsigned int generation, bool short_reset), - TP_ARGS(generation, short_reset) + TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset), + TP_ARGS(card_index, generation, short_reset) ); DEFINE_EVENT(bus_reset_arrange_template, bus_reset_schedule, - TP_PROTO(unsigned int generation, bool short_reset), - TP_ARGS(generation, short_reset) + TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset), + TP_ARGS(card_index, generation, short_reset) ); DEFINE_EVENT(bus_reset_arrange_template, bus_reset_postpone, - TP_PROTO(unsigned int generation, bool short_reset), - TP_ARGS(generation, short_reset) + TP_PROTO(unsigned int card_index, unsigned int generation, bool short_reset), + TP_ARGS(card_index, generation, short_reset) ); TRACE_EVENT(bus_reset_handle, From 893098b2af3ea12bab2f505aa825662b379df67d Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Thu, 13 Jun 2024 22:14:40 +0900 Subject: [PATCH 525/537] firewire: core: record card index in bus_reset_handle tracepoints event The bus reset event occurs in the bus managed by one of 1394 OHCI controller in Linux system, however the existing tracepoints events has the lack of data about it to distinguish the issued hardware from the others. This commit adds card_index member into event structure to store the index of host controller in use, and prints it. Link: https://lore.kernel.org/r/20240613131440.431766-9-o-takashi@sakamocchi.jp Signed-off-by: Takashi Sakamoto --- drivers/firewire/core-topology.c | 2 +- include/trace/events/firewire.h | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 837cc44d8d9f2..8107eebd42967 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -508,7 +508,7 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation, struct fw_node *local_node; unsigned long flags; - trace_bus_reset_handle(generation, node_id, bm_abdicate, self_ids, self_id_count); + trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count); spin_lock_irqsave(&card->lock, flags); diff --git a/include/trace/events/firewire.h b/include/trace/events/firewire.h index e6485051f546d..5ccc0d91b220f 100644 --- a/include/trace/events/firewire.h +++ b/include/trace/events/firewire.h @@ -339,22 +339,25 @@ DEFINE_EVENT(bus_reset_arrange_template, bus_reset_postpone, ); TRACE_EVENT(bus_reset_handle, - TP_PROTO(unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count), - TP_ARGS(generation, node_id, bm_abdicate, self_ids, self_id_count), + TP_PROTO(unsigned int card_index, unsigned int generation, unsigned int node_id, bool bm_abdicate, u32 *self_ids, unsigned int self_id_count), + TP_ARGS(card_index, generation, node_id, bm_abdicate, self_ids, self_id_count), TP_STRUCT__entry( + __field(u8, card_index) __field(u8, generation) __field(u8, node_id) __field(bool, bm_abdicate) __dynamic_array(u32, self_ids, self_id_count) ), TP_fast_assign( + __entry->card_index = card_index; __entry->generation = generation; __entry->node_id = node_id; __entry->bm_abdicate = bm_abdicate; memcpy(__get_dynamic_array(self_ids), self_ids, __get_dynamic_array_len(self_ids)); ), TP_printk( - "generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s", + "card_index=%u generation=%u node_id=0x%04x bm_abdicate=%s self_ids=%s", + __entry->card_index, __entry->generation, __entry->node_id, __entry->bm_abdicate ? "true" : "false", From ba437905b4fbf0ee1686c175069239a1cc292558 Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Fri, 7 Jun 2024 16:33:00 -0500 Subject: [PATCH 526/537] RAS/AMD/ATL: Use system settings for MI300 DRAM to normalized address translation The currently used normalized address format is not applicable to all MI300 systems. This leads to incorrect results during address translation. Drop the fixed layout and construct the normalized address from system settings. Fixes: 87a612375307 ("RAS/AMD/ATL: Add MI300 DRAM to normalized address translation support") Signed-off-by: Yazen Ghannam Signed-off-by: Borislav Petkov (AMD) Cc: Link: https://lore.kernel.org/r/20240607-mi300-dram-xl-fix-v1-2-2f11547a178c@amd.com --- drivers/ras/amd/atl/internal.h | 2 +- drivers/ras/amd/atl/system.c | 2 +- drivers/ras/amd/atl/umc.c | 151 ++++++++++++++++++++++++--------- 3 files changed, 114 insertions(+), 41 deletions(-) diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h index 5de69e0bb0f99..196c1c8b578ce 100644 --- a/drivers/ras/amd/atl/internal.h +++ b/drivers/ras/amd/atl/internal.h @@ -224,7 +224,7 @@ int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo); int get_df_system_info(void); int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num); -int get_addr_hash_mi300(void); +int get_umc_info_mi300(void); int get_address_map(struct addr_ctx *ctx); diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c index 701349e849428..6979fa3d4fe25 100644 --- a/drivers/ras/amd/atl/system.c +++ b/drivers/ras/amd/atl/system.c @@ -127,7 +127,7 @@ static int df4_determine_df_rev(u32 reg) if (reg == DF_FUNC0_ID_MI300) { df_cfg.flags.heterogeneous = 1; - if (get_addr_hash_mi300()) + if (get_umc_info_mi300()) return -EINVAL; } diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c index 5cb92330dc677..a1b4accf7b965 100644 --- a/drivers/ras/amd/atl/umc.c +++ b/drivers/ras/amd/atl/umc.c @@ -68,6 +68,8 @@ struct xor_bits { }; #define NUM_BANK_BITS 4 +#define NUM_COL_BITS 5 +#define NUM_SID_BITS 2 static struct { /* UMC::CH::AddrHashBank */ @@ -80,7 +82,22 @@ static struct { u8 bank_xor; } addr_hash; +static struct { + u8 bank[NUM_BANK_BITS]; + u8 col[NUM_COL_BITS]; + u8 sid[NUM_SID_BITS]; + u8 num_row_lo; + u8 num_row_hi; + u8 row_lo; + u8 row_hi; + u8 pc; +} bit_shifts; + #define MI300_UMC_CH_BASE 0x90000 +#define MI300_ADDR_CFG (MI300_UMC_CH_BASE + 0x30) +#define MI300_ADDR_SEL (MI300_UMC_CH_BASE + 0x40) +#define MI300_COL_SEL_LO (MI300_UMC_CH_BASE + 0x50) +#define MI300_ADDR_SEL_2 (MI300_UMC_CH_BASE + 0xA4) #define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8) #define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0) #define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4) @@ -90,17 +107,42 @@ static struct { #define ADDR_HASH_ROW_XOR GENMASK(31, 14) #define ADDR_HASH_BANK_XOR GENMASK(5, 0) +#define ADDR_CFG_NUM_ROW_LO GENMASK(11, 8) +#define ADDR_CFG_NUM_ROW_HI GENMASK(15, 12) + +#define ADDR_SEL_BANK0 GENMASK(3, 0) +#define ADDR_SEL_BANK1 GENMASK(7, 4) +#define ADDR_SEL_BANK2 GENMASK(11, 8) +#define ADDR_SEL_BANK3 GENMASK(15, 12) +#define ADDR_SEL_BANK4 GENMASK(20, 16) +#define ADDR_SEL_ROW_LO GENMASK(27, 24) +#define ADDR_SEL_ROW_HI GENMASK(31, 28) + +#define COL_SEL_LO_COL0 GENMASK(3, 0) +#define COL_SEL_LO_COL1 GENMASK(7, 4) +#define COL_SEL_LO_COL2 GENMASK(11, 8) +#define COL_SEL_LO_COL3 GENMASK(15, 12) +#define COL_SEL_LO_COL4 GENMASK(19, 16) + +#define ADDR_SEL_2_BANK5 GENMASK(4, 0) +#define ADDR_SEL_2_CHAN GENMASK(15, 12) + /* * Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used - * for hashing. Do this during module init, since the values will not - * change during run time. + * for hashing. + * + * Also, read UMC::CH::Addr{Cfg,Sel,Sel2} and UMC::CH:ColSelLo registers to + * get the values needed to reconstruct the normalized address. Apply additional + * offsets to the raw register values, as needed. + * + * Do this during module init, since the values will not change during run time. * * These registers are instantiated for each UMC across each AMD Node. * However, they should be identically programmed due to the fixed hardware * design of MI300 systems. So read the values from Node 0 UMC 0 and keep a * single global structure for simplicity. */ -int get_addr_hash_mi300(void) +int get_umc_info_mi300(void) { u32 temp; int ret; @@ -130,6 +172,44 @@ int get_addr_hash_mi300(void) addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp); + ret = amd_smn_read(0, MI300_ADDR_CFG, &temp); + if (ret) + return ret; + + bit_shifts.num_row_hi = FIELD_GET(ADDR_CFG_NUM_ROW_HI, temp); + bit_shifts.num_row_lo = 10 + FIELD_GET(ADDR_CFG_NUM_ROW_LO, temp); + + ret = amd_smn_read(0, MI300_ADDR_SEL, &temp); + if (ret) + return ret; + + bit_shifts.bank[0] = 5 + FIELD_GET(ADDR_SEL_BANK0, temp); + bit_shifts.bank[1] = 5 + FIELD_GET(ADDR_SEL_BANK1, temp); + bit_shifts.bank[2] = 5 + FIELD_GET(ADDR_SEL_BANK2, temp); + bit_shifts.bank[3] = 5 + FIELD_GET(ADDR_SEL_BANK3, temp); + /* Use BankBit4 for the SID0 position. */ + bit_shifts.sid[0] = 5 + FIELD_GET(ADDR_SEL_BANK4, temp); + bit_shifts.row_lo = 12 + FIELD_GET(ADDR_SEL_ROW_LO, temp); + bit_shifts.row_hi = 24 + FIELD_GET(ADDR_SEL_ROW_HI, temp); + + ret = amd_smn_read(0, MI300_COL_SEL_LO, &temp); + if (ret) + return ret; + + bit_shifts.col[0] = 2 + FIELD_GET(COL_SEL_LO_COL0, temp); + bit_shifts.col[1] = 2 + FIELD_GET(COL_SEL_LO_COL1, temp); + bit_shifts.col[2] = 2 + FIELD_GET(COL_SEL_LO_COL2, temp); + bit_shifts.col[3] = 2 + FIELD_GET(COL_SEL_LO_COL3, temp); + bit_shifts.col[4] = 2 + FIELD_GET(COL_SEL_LO_COL4, temp); + + ret = amd_smn_read(0, MI300_ADDR_SEL_2, &temp); + if (ret) + return ret; + + /* Use BankBit5 for the SID1 position. */ + bit_shifts.sid[1] = 5 + FIELD_GET(ADDR_SEL_2_BANK5, temp); + bit_shifts.pc = 5 + FIELD_GET(ADDR_SEL_2_CHAN, temp); + return 0; } @@ -146,9 +226,6 @@ int get_addr_hash_mi300(void) * The MCA address format is as follows: * MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]} * - * The normalized address format is fixed in hardware and is as follows: - * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]} - * * Additionally, the PC and Bank bits may be hashed. This must be accounted for before * reconstructing the normalized address. */ @@ -158,18 +235,10 @@ int get_addr_hash_mi300(void) #define MI300_UMC_MCA_PC BIT(25) #define MI300_UMC_MCA_SID GENMASK(27, 26) -#define MI300_NA_COL_1_0 GENMASK(6, 5) -#define MI300_NA_PC BIT(7) -#define MI300_NA_COL_3_2 GENMASK(9, 8) -#define MI300_NA_BANK_3_2 GENMASK(11, 10) -#define MI300_NA_BANK_1_0 GENMASK(13, 12) -#define MI300_NA_COL_4 BIT(14) -#define MI300_NA_ROW GENMASK(28, 15) -#define MI300_NA_SID GENMASK(30, 29) - static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) { - u16 i, col, row, bank, pc, sid, temp; + u16 i, col, row, bank, pc, sid; + u32 temp; col = FIELD_GET(MI300_UMC_MCA_COL, addr); bank = FIELD_GET(MI300_UMC_MCA_BANK, addr); @@ -199,34 +268,38 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr) /* Reconstruct the normalized address starting with NA[4:0] = 0 */ addr = 0; - /* NA[6:5] = Column[1:0] */ - temp = col & 0x3; - addr |= FIELD_PREP(MI300_NA_COL_1_0, temp); - - /* NA[7] = PC */ - addr |= FIELD_PREP(MI300_NA_PC, pc); - - /* NA[9:8] = Column[3:2] */ - temp = (col >> 2) & 0x3; - addr |= FIELD_PREP(MI300_NA_COL_3_2, temp); + /* Column bits */ + for (i = 0; i < NUM_COL_BITS; i++) { + temp = (col >> i) & 0x1; + addr |= temp << bit_shifts.col[i]; + } - /* NA[11:10] = Bank[3:2] */ - temp = (bank >> 2) & 0x3; - addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp); + /* Bank bits */ + for (i = 0; i < NUM_BANK_BITS; i++) { + temp = (bank >> i) & 0x1; + addr |= temp << bit_shifts.bank[i]; + } - /* NA[13:12] = Bank[1:0] */ - temp = bank & 0x3; - addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp); + /* Row lo bits */ + for (i = 0; i < bit_shifts.num_row_lo; i++) { + temp = (row >> i) & 0x1; + addr |= temp << (i + bit_shifts.row_lo); + } - /* NA[14] = Column[4] */ - temp = (col >> 4) & 0x1; - addr |= FIELD_PREP(MI300_NA_COL_4, temp); + /* Row hi bits */ + for (i = 0; i < bit_shifts.num_row_hi; i++) { + temp = (row >> (i + bit_shifts.num_row_lo)) & 0x1; + addr |= temp << (i + bit_shifts.row_hi); + } - /* NA[28:15] = Row[13:0] */ - addr |= FIELD_PREP(MI300_NA_ROW, row); + /* PC bit */ + addr |= pc << bit_shifts.pc; - /* NA[30:29] = SID[1:0] */ - addr |= FIELD_PREP(MI300_NA_SID, sid); + /* SID bits */ + for (i = 0; i < NUM_SID_BITS; i++) { + temp = (sid >> i) & 0x1; + addr |= temp << bit_shifts.sid[i]; + } pr_debug("Addr=0x%016lx", addr); pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid); From fcf2a9970ef587d8f358560c381ee6115a9108aa Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 7 Jun 2024 12:18:47 +0200 Subject: [PATCH 527/537] leds: class: Revert: "If no default trigger is given, make hw_control trigger the default trigger" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 66601a29bb23 ("leds: class: If no default trigger is given, make hw_control trigger the default trigger") causes ledtrig-netdev to get set as default trigger on various network LEDs. This causes users to hit a pre-existing AB-BA deadlock issue in ledtrig-netdev between the LED-trigger locks and the rtnl mutex, resulting in hung tasks in kernels >= 6.9. Solving the deadlock is non trivial, so for now revert the change to set the hw_control trigger as default trigger, so that ledtrig-netdev no longer gets activated automatically for various network LEDs. The netdev trigger is not needed because the network LEDs are usually under hw-control and the netdev trigger tries to leave things that way so setting it as the active trigger for the LED class device is a no-op. Fixes: 66601a29bb23 ("leds: class: If no default trigger is given, make hw_control trigger the default trigger") Reported-by: Genes Lists Closes: https://lore.kernel.org/all/9d189ec329cfe68ed68699f314e191a10d4b5eda.camel@sapience.com/ Reported-by: Johannes Wüller Closes: https://lore.kernel.org/lkml/e441605c-eaf2-4c2d-872b-d8e541f4cf60@gmail.com/ Cc: stable@vger.kernel.org Signed-off-by: Hans de Goede Reviewed-by: Andrew Lunn Acked-by: Lee Jones Signed-off-by: Linus Torvalds --- drivers/leds/led-class.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 24fcff682b24a..ba1be15cfd8ea 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent, led_init_core(led_cdev); #ifdef CONFIG_LEDS_TRIGGERS - /* - * If no default trigger was given and hw_control_trigger is set, - * make it the default trigger. - */ - if (!led_cdev->default_trigger && led_cdev->hw_control_trigger) - led_cdev->default_trigger = led_cdev->hw_control_trigger; led_trigger_set_default(led_cdev); #endif From 6ba59ff4227927d3a8530fc2973b80e94b54d58f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 16 Jun 2024 13:40:16 -0700 Subject: [PATCH 528/537] Linux 6.10-rc4 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 925a75b8ba7de..14427547dc1eb 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Baby Opossum Posse # *DOCUMENTATION* From 581f5c7b1420c89192838088f70a2abee277cff4 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Thu, 13 Jun 2024 19:09:02 -0500 Subject: [PATCH 529/537] scx: Check for NULLity for ops.yield() @to arg The @to field in ops.yield() can be NULL (when the callback is invoked) on the yield_task_scx() path). Right now we're not communicating to the verifier that the arg can be NULL, so if callers don't check for NULL it can crash the host. Let's update ext.c to check. A subsequent patch will add tests. Signed-off-by: David Vernet (cherry picked from commit 3f4e6620ea04556b144710c195706dcbe79d6d43) --- kernel/sched/ext.c | 34 +++++++++++-------- ...l_fail.bpf.c => maybe_null_fail_dsp.bpf.c} | 0 2 files changed, 20 insertions(+), 14 deletions(-) rename tools/testing/selftests/sched_ext/{maybe_null_fail.bpf.c => maybe_null_fail_dsp.bpf.c} (100%) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 93e041e2f8d75..55a9c4fd208db 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5183,11 +5183,11 @@ extern struct btf *btf_vmlinux; static const struct btf_type *task_struct_type; static u32 task_struct_type_id; -/* Make the 2nd argument of .dispatch a pointer that can be NULL. */ -static bool promote_dispatch_2nd_arg(int off, int size, - enum bpf_access_type type, - const struct bpf_prog *prog, - struct bpf_insn_access_aux *info) +static bool promote_op_nth_arg(int arg_n, const char *op, + int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) { struct btf *btf = bpf_get_btf_vmlinux(); const struct bpf_struct_ops_desc *st_ops_desc; @@ -5196,6 +5196,10 @@ static bool promote_dispatch_2nd_arg(int off, int size, u32 btf_id, member_idx; const char *mname; + /* struct_ops op args are all sequential, 64-bit numbers */ + if (off != arg_n * sizeof(__u64)) + return false; + /* btf_id should be the type id of struct sched_ext_ops */ btf_id = prog->aux->attach_btf_id; st_ops_desc = bpf_struct_ops_find(btf, btf_id); @@ -5217,14 +5221,7 @@ static bool promote_dispatch_2nd_arg(int off, int size, member = &btf_type_member(t)[member_idx]; mname = btf_name_by_offset(btf_vmlinux, member->name_off); - /* - * Check if it is the second argument of the function pointer at - * "dispatch" in struct sched_ext_ops. The arguments of struct_ops - * operators are sequential and 64-bit, so the second argument is at - * offset sizeof(__u64). - */ - if (strcmp(mname, "dispatch") == 0 && - off == sizeof(__u64)) { + if (!strcmp(mname, op)) { /* * The value is a pointer to a type (struct task_struct) given * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED), @@ -5245,6 +5242,15 @@ static bool promote_dispatch_2nd_arg(int off, int size, return false; } +static bool promote_op_arg(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + return promote_op_nth_arg(1, "dispatch", off, size, type, prog, info) || + promote_op_nth_arg(1, "yield", off, size, type, prog, info); +} + static bool bpf_scx_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, @@ -5252,7 +5258,7 @@ static bool bpf_scx_is_valid_access(int off, int size, { if (type != BPF_READ) return false; - if (promote_dispatch_2nd_arg(off, size, type, prog, info)) + if (promote_op_arg(off, size, type, prog, info)) return true; if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) return false; diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c similarity index 100% rename from tools/testing/selftests/sched_ext/maybe_null_fail.bpf.c rename to tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c From 91b307ae171bac3cf09ac50d4e2f65e2fd5702ad Mon Sep 17 00:00:00 2001 From: David Vernet Date: Thu, 13 Jun 2024 20:12:35 -0500 Subject: [PATCH 530/537] scx: Test ops.yield() @to NULLity Now that we've told the verifier about the second argument to ops.yield(), let's add a test to verify the behavior. Signed-off-by: David Vernet (cherry picked from commit bc5f49cd6e37bfc0f2f826c332eeec458230452e) --- .../selftests/sched_ext/maybe_null.bpf.c | 10 +++++++ .../testing/selftests/sched_ext/maybe_null.c | 21 ++++++++++---- .../sched_ext/maybe_null_fail_dsp.bpf.c | 2 +- .../sched_ext/maybe_null_fail_yld.bpf.c | 28 +++++++++++++++++++ 4 files changed, 54 insertions(+), 7 deletions(-) create mode 100644 tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c diff --git a/tools/testing/selftests/sched_ext/maybe_null.bpf.c b/tools/testing/selftests/sched_ext/maybe_null.bpf.c index ad5e694226bbf..27d0f386acfb1 100644 --- a/tools/testing/selftests/sched_ext/maybe_null.bpf.c +++ b/tools/testing/selftests/sched_ext/maybe_null.bpf.c @@ -18,9 +18,19 @@ void BPF_STRUCT_OPS(maybe_null_success_dispatch, s32 cpu, struct task_struct *p) vtime_test = p->scx.dsq_vtime; } +bool BPF_STRUCT_OPS(maybe_null_success_yield, struct task_struct *from, + struct task_struct *to) +{ + if (to) + bpf_printk("Yielding to %s[%d]", to->comm, to->pid); + + return false; +} + SEC(".struct_ops.link") struct sched_ext_ops maybe_null_success = { .dispatch = maybe_null_success_dispatch, + .yield = maybe_null_success_yield, .enable = maybe_null_running, .name = "minimal", }; diff --git a/tools/testing/selftests/sched_ext/maybe_null.c b/tools/testing/selftests/sched_ext/maybe_null.c index 3f26b784f9c57..31cfafb0cf65a 100644 --- a/tools/testing/selftests/sched_ext/maybe_null.c +++ b/tools/testing/selftests/sched_ext/maybe_null.c @@ -7,13 +7,15 @@ #include #include #include "maybe_null.bpf.skel.h" -#include "maybe_null_fail.bpf.skel.h" +#include "maybe_null_fail_dsp.bpf.skel.h" +#include "maybe_null_fail_yld.bpf.skel.h" #include "scx_test.h" static enum scx_test_status run(void *ctx) { struct maybe_null *skel; - struct maybe_null_fail *fail_skel; + struct maybe_null_fail_dsp *fail_dsp; + struct maybe_null_fail_yld *fail_yld; skel = maybe_null__open_and_load(); if (!skel) { @@ -22,10 +24,17 @@ static enum scx_test_status run(void *ctx) } maybe_null__destroy(skel); - fail_skel = maybe_null_fail__open_and_load(); - if (fail_skel) { - maybe_null_fail__destroy(fail_skel); - SCX_ERR("Should failed to open and load maybe_null_fail skel"); + fail_dsp = maybe_null_fail_dsp__open_and_load(); + if (fail_dsp) { + maybe_null_fail_dsp__destroy(fail_dsp); + SCX_ERR("Should failed to open and load maybe_null_fail_dsp skel"); + return SCX_TEST_FAIL; + } + + fail_yld = maybe_null_fail_yld__open_and_load(); + if (fail_yld) { + maybe_null_fail_yld__destroy(fail_yld); + SCX_ERR("Should failed to open and load maybe_null_fail_yld skel"); return SCX_TEST_FAIL; } diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c index 1607fe07bead1..c0641050271d3 100644 --- a/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c +++ b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c @@ -21,5 +21,5 @@ SEC(".struct_ops.link") struct sched_ext_ops maybe_null_fail = { .dispatch = maybe_null_fail_dispatch, .enable = maybe_null_running, - .name = "minimal", + .name = "maybe_null_fail_dispatch", }; diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c new file mode 100644 index 0000000000000..3c1740028e3b9 --- /dev/null +++ b/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. + */ + +#include + +char _license[] SEC("license") = "GPL"; + +u64 vtime_test; + +void BPF_STRUCT_OPS(maybe_null_running, struct task_struct *p) +{} + +bool BPF_STRUCT_OPS(maybe_null_fail_yield, struct task_struct *from, + struct task_struct *to) +{ + bpf_printk("Yielding to %s[%d]", to->comm, to->pid); + + return false; +} + +SEC(".struct_ops.link") +struct sched_ext_ops maybe_null_fail = { + .yield = maybe_null_fail_yield, + .enable = maybe_null_running, + .name = "maybe_null_fail_yield", +}; From 448f1b1df8da730211f3b4ae4b382db5ef354db9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 14 Jun 2024 15:09:05 -1000 Subject: [PATCH 531/537] tools/sched_ext: Sync from scx repo (d3b34d1df7aabb66244607f1473ecc5774b5c9b3) (cherry picked from commit 389f282a8f1940d2a22cd5a6501254869af70e3e) --- tools/sched_ext/include/scx/common.bpf.h | 142 ++++++++++++++----- tools/sched_ext/include/scx/common.h | 17 +-- tools/sched_ext/include/scx/user_exit_info.h | 8 +- tools/sched_ext/scx_central.c | 4 +- tools/sched_ext/scx_qmap.bpf.c | 6 +- 5 files changed, 123 insertions(+), 54 deletions(-) diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index d0b708e959c1f..818c0febcb156 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -235,50 +235,15 @@ BPF_PROG(name, ##args) __addr; \ }) + /* - * BPF core and other generic helpers + * BPF declarations and helpers */ /* list and rbtree */ #define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node))) #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) -/* - * bpf_log2 - Compute the base 2 logarithm of a 32-bit exponential value. - * @v: The value for which we're computing the base 2 logarithm. - */ -static inline u32 bpf_log2(u32 v) -{ - u32 r; - u32 shift; - - r = (v > 0xFFFF) << 4; v >>= r; - shift = (v > 0xFF) << 3; v >>= shift; r |= shift; - shift = (v > 0xF) << 2; v >>= shift; r |= shift; - shift = (v > 0x3) << 1; v >>= shift; r |= shift; - r |= (v >> 1); - return r; -} - -/* - * bpf_log2l - Compute the base 2 logarithm of a 64-bit exponential value. - * @v: The value for which we're computing the base 2 logarithm. - */ -static inline u32 bpf_log2l(u64 v) -{ - u32 hi = v >> 32; - if (hi) - return bpf_log2(hi) + 32 + 1; - else - return bpf_log2(v) + 1; -} - -/* useful compiler attributes */ -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) -#define __maybe_unused __attribute__((__unused__)) - - void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; @@ -311,6 +276,16 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; void bpf_cgroup_release(struct cgroup *cgrp) __ksym; struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +/* css iteration */ +struct bpf_iter_css; +struct cgroup_subsys_state; +extern int bpf_iter_css_new(struct bpf_iter_css *it, + struct cgroup_subsys_state *start, + unsigned int flags) __weak __ksym; +extern struct cgroup_subsys_state * +bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym; +extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; + /* cpumask */ struct bpf_cpumask *bpf_cpumask_create(void) __ksym; struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; @@ -344,6 +319,99 @@ u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; + +/* + * Other helpers + */ + +/* useful compiler attributes */ +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#define __maybe_unused __attribute__((__unused__)) + +/* + * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They + * prevent compiler from caching, redoing or reordering reads or writes. + */ +typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; +typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; +typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; +typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; + +static __always_inline void __read_once_size(const volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; + case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; + case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; + case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; + default: + barrier(); + __builtin_memcpy((void *)res, (const void *)p, size); + barrier(); + } +} + +static __always_inline void __write_once_size(volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; + case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; + case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; + case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; + default: + barrier(); + __builtin_memcpy((void *)p, (const void *)res, size); + barrier(); + } +} + +#define READ_ONCE(x) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__c = { 0 } }; \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + +#define WRITE_ONCE(x, val) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__val = (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + +/* + * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value. + * @v: The value for which we're computing the base 2 logarithm. + */ +static inline u32 log2_u32(u32 v) +{ + u32 r; + u32 shift; + + r = (v > 0xFFFF) << 4; v >>= r; + shift = (v > 0xFF) << 3; v >>= shift; r |= shift; + shift = (v > 0xF) << 2; v >>= shift; r |= shift; + shift = (v > 0x3) << 1; v >>= shift; r |= shift; + r |= (v >> 1); + return r; +} + +/* + * log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value. + * @v: The value for which we're computing the base 2 logarithm. + */ +static inline u32 log2_u64(u64 v) +{ + u32 hi = v >> 32; + if (hi) + return log2_u32(hi) + 32 + 1; + else + return log2_u32(v) + 1; +} + #include "compat.bpf.h" #endif /* __SCX_COMMON_BPF_H */ diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h index 8d5a6775f64d9..49234dad00a15 100644 --- a/tools/sched_ext/include/scx/common.h +++ b/tools/sched_ext/include/scx/common.h @@ -44,7 +44,8 @@ typedef int64_t s64; /** * RESIZE_ARRAY - Convenience macro for resizing a BPF array - * @elfsec: the data section of the BPF program in which to the array exists + * @__skel: the skeleton containing the array + * @elfsec: the data section of the BPF program in which the array exists * @arr: the name of the array * @n: the desired array element count * @@ -56,13 +57,13 @@ typedef int64_t s64; * for that custom data section so that it points to the newly memory mapped * region. */ -#define RESIZE_ARRAY(elfsec, arr, n) \ - do { \ - size_t __sz; \ - bpf_map__set_value_size(skel->maps.elfsec##_##arr, \ - sizeof(skel->elfsec##_##arr->arr[0]) * (n)); \ - skel->elfsec##_##arr = \ - bpf_map__initial_value(skel->maps.elfsec##_##arr, &__sz); \ +#define RESIZE_ARRAY(__skel, elfsec, arr, n) \ + do { \ + size_t __sz; \ + bpf_map__set_value_size((__skel)->maps.elfsec##_##arr, \ + sizeof((__skel)->elfsec##_##arr->arr[0]) * (n)); \ + (__skel)->elfsec##_##arr = \ + bpf_map__initial_value((__skel)->maps.elfsec##_##arr, &__sz); \ } while (0) #include "user_exit_info.h" diff --git a/tools/sched_ext/include/scx/user_exit_info.h b/tools/sched_ext/include/scx/user_exit_info.h index 2d86d01a95752..891693ee604ec 100644 --- a/tools/sched_ext/include/scx/user_exit_info.h +++ b/tools/sched_ext/include/scx/user_exit_info.h @@ -53,10 +53,10 @@ struct user_exit_info { #include /* no need to call the following explicitly if SCX_OPS_LOAD() is used */ -#define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \ - u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \ - (__skel)->rodata->__uei_name##_dump_len = __len; \ - RESIZE_ARRAY(data, __uei_name##_dump, __len); \ +#define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \ + u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \ + (__skel)->rodata->__uei_name##_dump_len = __len; \ + RESIZE_ARRAY((__skel), data, __uei_name##_dump, __len); \ }) #define UEI_EXITED(__skel, __uei_name) ({ \ diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c index 1e0568624ccc2..21deea320bd78 100644 --- a/tools/sched_ext/scx_central.c +++ b/tools/sched_ext/scx_central.c @@ -77,8 +77,8 @@ int main(int argc, char **argv) } /* Resize arrays so their element count is equal to cpu count. */ - RESIZE_ARRAY(data, cpu_gimme_task, skel->rodata->nr_cpu_ids); - RESIZE_ARRAY(data, cpu_started_at, skel->rodata->nr_cpu_ids); + RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids); + RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids); SCX_OPS_LOAD(skel, central_ops, scx_central, uei); diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c index d74c5cf2a2517..e22f190f52249 100644 --- a/tools/sched_ext/scx_qmap.bpf.c +++ b/tools/sched_ext/scx_qmap.bpf.c @@ -579,7 +579,7 @@ struct { __uint(max_entries, 1); __type(key, u32); __type(value, struct monitor_timer); -} central_timer SEC(".maps"); +} monitor_timer SEC(".maps"); /* * Print out the min, avg and max performance levels of CPUs every second to @@ -692,11 +692,11 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(qmap_init) if (ret) return ret; - timer = bpf_map_lookup_elem(¢ral_timer, &key); + timer = bpf_map_lookup_elem(&monitor_timer, &key); if (!timer) return -ESRCH; - bpf_timer_init(timer, ¢ral_timer, CLOCK_MONOTONIC); + bpf_timer_init(timer, &monitor_timer, CLOCK_MONOTONIC); bpf_timer_set_callback(timer, monitor_timerfn); return bpf_timer_start(timer, ONE_SEC_IN_NS, 0); From c9a2168cc0f654a6bf539cfec30664c790954ef7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 15 Jun 2024 06:49:37 -1000 Subject: [PATCH 532/537] scx: Cosmetic changes - Whitespace and comment adj. - Use sched_class_above() instead of naked comparison. - Reorder hotplug functions. - s/promote_op_nth_arg()/set_arg_maybe_null()/. Relocate @arg_n after @op and remove promote_op_arg(). - Reorder declarations and dummy defs in kernel/sched/ext.h. No functional changes. (cherry picked from commit 159d0cf55f62ecd7f1dfb9c4e44ce442f13c78a6) --- kernel/sched/ext.c | 40 ++++++++++++++++------------------------ kernel/sched/ext.h | 14 +++++++------- 2 files changed, 23 insertions(+), 31 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 55a9c4fd208db..a57576ed5855d 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1017,7 +1017,7 @@ static u32 higher_bits(u32 flags) static u32 highest_bit(u32 flags) { int bit = fls(flags); - return ((u64) 1 << bit) >> 1; + return ((u64)1 << bit) >> 1; } /* @@ -1820,8 +1820,8 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, goto local_norefill; /* - * If !rq->online, we already told the BPF scheduler that the CPU is - * offline. We're just trying to on/offline the CPU. Don't bother the + * If !scx_rq_online(), we already told the BPF scheduler that the CPU + * is offline and are just running the hotplug path. Don't bother the * BPF scheduler. */ if (!scx_rq_online(rq)) @@ -2402,7 +2402,8 @@ dispatch_to_local_dsq(struct rq *rq, struct rq_flags *rf, u64 dsq_id, } /* if the destination CPU is idle, wake it up */ - if (dsp && p->sched_class < dst_rq->curr->sched_class) + if (dsp && sched_class_above(p->sched_class, + dst_rq->curr->sched_class)) resched_curr(dst_rq); dispatch_to_local_dsq_unlock(rq, rf, src_rq, locked_dst_rq); @@ -3218,24 +3219,24 @@ static void handle_hotplug(struct rq *rq, bool online) online ? "online" : "offline"); } -static void rq_online_scx(struct rq *rq) +void scx_rq_activate(struct rq *rq) { - rq->scx.flags |= SCX_RQ_ONLINE; + handle_hotplug(rq, true); } -static void rq_offline_scx(struct rq *rq) +void scx_rq_deactivate(struct rq *rq) { - rq->scx.flags &= ~SCX_RQ_ONLINE; + handle_hotplug(rq, false); } -void scx_rq_activate(struct rq *rq) +static void rq_online_scx(struct rq *rq) { - handle_hotplug(rq, true); + rq->scx.flags |= SCX_RQ_ONLINE; } -void scx_rq_deactivate(struct rq *rq) +static void rq_offline_scx(struct rq *rq) { - handle_hotplug(rq, false); + rq->scx.flags &= ~SCX_RQ_ONLINE; } #else /* CONFIG_SMP */ @@ -5183,8 +5184,7 @@ extern struct btf *btf_vmlinux; static const struct btf_type *task_struct_type; static u32 task_struct_type_id; -static bool promote_op_nth_arg(int arg_n, const char *op, - int off, int size, +static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) @@ -5242,15 +5242,6 @@ static bool promote_op_nth_arg(int arg_n, const char *op, return false; } -static bool promote_op_arg(int off, int size, - enum bpf_access_type type, - const struct bpf_prog *prog, - struct bpf_insn_access_aux *info) -{ - return promote_op_nth_arg(1, "dispatch", off, size, type, prog, info) || - promote_op_nth_arg(1, "yield", off, size, type, prog, info); -} - static bool bpf_scx_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, @@ -5258,7 +5249,8 @@ static bool bpf_scx_is_valid_access(int off, int size, { if (type != BPF_READ) return false; - if (promote_op_arg(off, size, type, prog, info)) + if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) || + set_arg_maybe_null("yield", 1, off, size, type, prog, info)) return true; if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) return false; diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 52d9b7df2a253..dddcf36b76a77 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -41,12 +41,12 @@ void scx_pre_fork(struct task_struct *p); int scx_fork(struct task_struct *p); void scx_post_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p); -int scx_check_setscheduler(struct task_struct *p, int policy); bool scx_can_stop_tick(struct rq *rq); -bool task_should_scx(struct task_struct *p); -void init_sched_ext_class(void); void scx_rq_activate(struct rq *rq); void scx_rq_deactivate(struct rq *rq); +int scx_check_setscheduler(struct task_struct *p, int policy); +bool task_should_scx(struct task_struct *p); +void init_sched_ext_class(void); static inline u32 scx_cpuperf_target(s32 cpu) { @@ -97,13 +97,13 @@ static inline void scx_pre_fork(struct task_struct *p) {} static inline int scx_fork(struct task_struct *p) { return 0; } static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {} -static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; } -static inline bool scx_can_stop_tick(struct rq *rq) { return true; } -static inline bool task_on_scx(const struct task_struct *p) { return false; } -static inline void init_sched_ext_class(void) {} static inline u32 scx_cpuperf_target(s32 cpu) { return 0; } +static inline bool scx_can_stop_tick(struct rq *rq) { return true; } static inline void scx_rq_activate(struct rq *rq) {} static inline void scx_rq_deactivate(struct rq *rq) {} +static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; } +static inline bool task_on_scx(const struct task_struct *p) { return false; } +static inline void init_sched_ext_class(void) {} #define for_each_active_class for_each_class #define for_balance_class_range for_class_range From 1489c9c695732e4b677afc8382a39053af207f57 Mon Sep 17 00:00:00 2001 From: Daniel Hodges Date: Sun, 16 Jun 2024 09:49:31 -0700 Subject: [PATCH 533/537] scx: Fix spelling in function comment Quick fix of spelling in dispatch_to_local_dsq_lock comment. Signed-off-by: Daniel Hodges (cherry picked from commit acb3ddb20fda0dd44ca3c22a54840ab0ac72e66f) --- kernel/sched/ext.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index a57576ed5855d..2cf7dd5d5db1e 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -2137,7 +2137,7 @@ static bool move_task_to_local_dsq(struct rq *rq, struct task_struct *p, } /** - * dispatch_to_local_dsq_lock - Ensure source and desitnation rq's are locked + * dispatch_to_local_dsq_lock - Ensure source and destination rq's are locked * @rq: current rq which is locked * @rf: rq_flags to use when unlocking @rq * @src_rq: rq to move task from From 1e1ec9a38e81a11e2a83da35de984a4eb58daebd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 16 Jun 2024 14:10:41 -1000 Subject: [PATCH 534/537] tools/sched_ext: Sync from the scx repo (b6ebdc635acf450e9c2e71717500556cd3cf67a1) (cherry picked from commit e1f6f964f4017466a0757af96322d88ebe2a9a8c) --- tools/sched_ext/include/scx/common.h | 7 ++- tools/sched_ext/include/scx/compat.bpf.h | 77 ------------------------ tools/sched_ext/include/scx/compat.h | 48 +++------------ tools/sched_ext/scx_central.bpf.c | 3 +- tools/sched_ext/scx_qmap.bpf.c | 28 +++------ tools/sched_ext/scx_qmap.c | 3 +- tools/sched_ext/scx_simple.bpf.c | 1 - 7 files changed, 23 insertions(+), 144 deletions(-) diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h index 49234dad00a15..5b0f901521523 100644 --- a/tools/sched_ext/include/scx/common.h +++ b/tools/sched_ext/include/scx/common.h @@ -28,8 +28,11 @@ typedef int64_t s64; #define SCX_BUG(__fmt, ...) \ do { \ - fprintf(stderr, "%s:%d [scx panic]: %s\n", __FILE__, __LINE__, \ - strerror(errno)); \ + fprintf(stderr, "[SCX_BUG] %s:%d", __FILE__, __LINE__); \ + if (errno) \ + fprintf(stderr, " (%s)\n", strerror(errno)); \ + else \ + fprintf(stderr, "\n"); \ fprintf(stderr, __fmt __VA_OPT__(,) __VA_ARGS__); \ fprintf(stderr, "\n"); \ \ diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h index 914baac2e9657..1106c7eee279d 100644 --- a/tools/sched_ext/include/scx/compat.bpf.h +++ b/tools/sched_ext/include/scx/compat.bpf.h @@ -15,83 +15,6 @@ __ret; \ }) -/* - * %SCX_KICK_IDLE is a later addition. To support both before and after, use - * %__COMPAT_SCX_KICK_IDLE which becomes 0 on kernels which don't support it. - * Users can use %SCX_KICK_IDLE directly in the future. - */ -#define __COMPAT_SCX_KICK_IDLE \ - __COMPAT_ENUM_OR_ZERO(enum scx_kick_flags, SCX_KICK_IDLE) - -/* - * scx_switch_all() was replaced by %SCX_OPS_SWITCH_PARTIAL. See - * %__COMPAT_SCX_OPS_SWITCH_PARTIAL in compat.h. This can be dropped in the - * future. - */ -void scx_bpf_switch_all(void) __ksym __weak; - -static inline void __COMPAT_scx_bpf_switch_all(void) -{ - if (!bpf_core_enum_value_exists(enum scx_ops_flags, SCX_OPS_SWITCH_PARTIAL)) - scx_bpf_switch_all(); -} - -/* - * scx_bpf_exit() is a new addition. Fall back to scx_bpf_error() if - * unavailable. Users can use scx_bpf_exit() directly in the future. - */ -#define __COMPAT_scx_bpf_exit(code, fmt, args...) \ -({ \ - if (bpf_ksym_exists(scx_bpf_exit_bstr)) \ - scx_bpf_exit((code), fmt, ##args); \ - else \ - scx_bpf_error(fmt, ##args); \ -}) - -/* - * scx_bpf_dump() is a new addition. Ignore if unavailable. Users can use - * scx_bpf_dump() directly in the future. - */ -#define __COMPAT_scx_bpf_dump(fmt, args...) \ -({ \ - if (bpf_ksym_exists(scx_bpf_dump_bstr)) \ - scx_bpf_dump(fmt, ##args); \ -}) - -/* - * scx_bpf_nr_cpu_ids(), scx_bpf_get_possible/online_cpumask() are new. No good - * way to noop these kfuncs. Provide a test macro. Users can assume existence in - * the future. - */ -#define __COMPAT_HAS_CPUMASKS \ - bpf_ksym_exists(scx_bpf_nr_cpu_ids) - -/* - * cpuperf is new. The followings become noop on older kernels. Callers can be - * updated to call cpuperf kfuncs directly in the future. - */ -static inline u32 __COMPAT_scx_bpf_cpuperf_cap(s32 cpu) -{ - if (bpf_ksym_exists(scx_bpf_cpuperf_cap)) - return scx_bpf_cpuperf_cap(cpu); - else - return 1024; -} - -static inline u32 __COMPAT_scx_bpf_cpuperf_cur(s32 cpu) -{ - if (bpf_ksym_exists(scx_bpf_cpuperf_cur)) - return scx_bpf_cpuperf_cur(cpu); - else - return 1024; -} - -static inline void __COMPAT_scx_bpf_cpuperf_set(s32 cpu, u32 perf) -{ - if (bpf_ksym_exists(scx_bpf_cpuperf_set)) - return scx_bpf_cpuperf_set(cpu, perf); -} - /* * Iteration and scx_bpf_consume_task() are new. The following become noop on * older kernels. The users can switch to bpf_for_each(scx_dsq) and directly diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h index 47ec920f8776d..9a075e63a3308 100644 --- a/tools/sched_ext/include/scx/compat.h +++ b/tools/sched_ext/include/scx/compat.h @@ -106,23 +106,9 @@ static inline bool __COMPAT_struct_has_field(const char *type, const char *field return false; } -/* - * An ops flag, %SCX_OPS_SWITCH_PARTIAL, replaced scx_bpf_switch_all() which had - * to be called from ops.init(). To support both before and after, use both - * %__COMPAT_SCX_OPS_SWITCH_PARTIAL and %__COMPAT_scx_bpf_switch_all() defined - * in compat.bpf.h. Users can switch to directly using %SCX_OPS_SWITCH_PARTIAL - * in the future. - */ -#define __COMPAT_SCX_OPS_SWITCH_PARTIAL \ +#define SCX_OPS_SWITCH_PARTIAL \ __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL") -/* - * scx_bpf_nr_cpu_ids(), scx_bpf_get_possible/online_cpumask() are new. Users - * will be able to assume existence in the future. - */ -#define __COMPAT_HAS_CPUMASKS \ - __COMPAT_has_ksym("scx_bpf_nr_cpu_ids") - /* * DSQ iterator is new. Users will be able to assume existence in the future. */ @@ -157,43 +143,23 @@ static inline long scx_hotplug_seq(void) * and attach it, backward compatibility is automatically maintained where * reasonable. * - * - ops.tick(): Ignored on older kernels with a warning. - * - ops.dump*(): Ignored on older kernels with a warning. - * - ops.exit_dump_len: Cleared to zero on older kernels with a warning. - * - ops.hotplug_seq: Ignored on older kernels. + * ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is + * the current minimum required kernel version. */ #define SCX_OPS_OPEN(__ops_name, __scx_name) ({ \ struct __scx_name *__skel; \ \ + SCX_BUG_ON(!__COMPAT_struct_has_field("sched_ext_ops", "dump"), \ + "sched_ext_ops.dump() missing, kernel too old?"); \ + \ __skel = __scx_name##__open(); \ SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \ - \ - if (__COMPAT_struct_has_field("sched_ext_ops", "hotplug_seq")) \ - __skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \ + __skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \ __skel; \ }) #define SCX_OPS_LOAD(__skel, __ops_name, __scx_name, __uei_name) ({ \ UEI_SET_SIZE(__skel, __ops_name, __uei_name); \ - if (!__COMPAT_struct_has_field("sched_ext_ops", "exit_dump_len") && \ - (__skel)->struct_ops.__ops_name->exit_dump_len) { \ - fprintf(stderr, "WARNING: kernel doesn't support setting exit dump len\n"); \ - (__skel)->struct_ops.__ops_name->exit_dump_len = 0; \ - } \ - if (!__COMPAT_struct_has_field("sched_ext_ops", "tick") && \ - (__skel)->struct_ops.__ops_name->tick) { \ - fprintf(stderr, "WARNING: kernel doesn't support ops.tick()\n"); \ - (__skel)->struct_ops.__ops_name->tick = NULL; \ - } \ - if (!__COMPAT_struct_has_field("sched_ext_ops", "dump") && \ - ((__skel)->struct_ops.__ops_name->dump || \ - (__skel)->struct_ops.__ops_name->dump_cpu || \ - (__skel)->struct_ops.__ops_name->dump_task)) { \ - fprintf(stderr, "WARNING: kernel doesn't support ops.dump*()\n"); \ - (__skel)->struct_ops.__ops_name->dump = NULL; \ - (__skel)->struct_ops.__ops_name->dump_cpu = NULL; \ - (__skel)->struct_ops.__ops_name->dump_task = NULL; \ - } \ SCX_BUG_ON(__scx_name##__load((__skel)), "Failed to load skel"); \ }) diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c index b297ccbd70b4d..1d8fd570eaa71 100644 --- a/tools/sched_ext/scx_central.bpf.c +++ b/tools/sched_ext/scx_central.bpf.c @@ -175,7 +175,7 @@ static bool dispatch_to_cpu(s32 cpu) scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0); if (cpu != central_cpu) - scx_bpf_kick_cpu(cpu, __COMPAT_SCX_KICK_IDLE); + scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE); bpf_task_release(p); return true; @@ -305,7 +305,6 @@ int BPF_STRUCT_OPS_SLEEPABLE(central_init) struct bpf_timer *timer; int ret; - __COMPAT_scx_bpf_switch_all(); ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1); if (ret) return ret; diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c index e22f190f52249..ce87c0239a03d 100644 --- a/tools/sched_ext/scx_qmap.bpf.c +++ b/tools/sched_ext/scx_qmap.bpf.c @@ -41,7 +41,6 @@ const volatile bool print_shared_dsq; const volatile char exp_prefix[17]; const volatile s32 disallow_tgid; const volatile bool suppress_dump; -const volatile bool switch_partial; u32 test_error_cnt; @@ -222,7 +221,7 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) scx_bpf_dispatch(p, SHARED_DSQ, 0, enq_flags); cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); if (cpu >= 0) - scx_bpf_kick_cpu(cpu, __COMPAT_SCX_KICK_IDLE); + scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE); return; } @@ -476,13 +475,13 @@ void BPF_STRUCT_OPS(qmap_dump, struct scx_dump_ctx *dctx) if (!(fifo = bpf_map_lookup_elem(&queue_arr, &i))) return; - __COMPAT_scx_bpf_dump("QMAP FIFO[%d]:", i); + scx_bpf_dump("QMAP FIFO[%d]:", i); bpf_repeat(4096) { if (bpf_map_pop_elem(fifo, &pid)) break; - __COMPAT_scx_bpf_dump(" %d", pid); + scx_bpf_dump(" %d", pid); } - __COMPAT_scx_bpf_dump("\n"); + scx_bpf_dump("\n"); } } @@ -496,9 +495,9 @@ void BPF_STRUCT_OPS(qmap_dump_cpu, struct scx_dump_ctx *dctx, s32 cpu, bool idle if (!(cpuc = bpf_map_lookup_percpu_elem(&cpu_ctx_stor, &zero, cpu))) return; - __COMPAT_scx_bpf_dump("QMAP: dsp_idx=%llu dsp_cnt=%llu avg_weight=%u cpuperf_target=%u", - cpuc->dsp_idx, cpuc->dsp_cnt, cpuc->avg_weight, - cpuc->cpuperf_target); + scx_bpf_dump("QMAP: dsp_idx=%llu dsp_cnt=%llu avg_weight=%u cpuperf_target=%u", + cpuc->dsp_idx, cpuc->dsp_cnt, cpuc->avg_weight, + cpuc->cpuperf_target); } void BPF_STRUCT_OPS(qmap_dump_task, struct scx_dump_ctx *dctx, struct task_struct *p) @@ -510,8 +509,8 @@ void BPF_STRUCT_OPS(qmap_dump_task, struct scx_dump_ctx *dctx, struct task_struc if (!(taskc = bpf_task_storage_get(&task_ctx_stor, p, 0, 0))) return; - __COMPAT_scx_bpf_dump("QMAP: force_local=%d core_sched_seq=%llu", - taskc->force_local, taskc->core_sched_seq); + scx_bpf_dump("QMAP: force_local=%d core_sched_seq=%llu", + taskc->force_local, taskc->core_sched_seq); } /* @@ -525,9 +524,6 @@ static void print_cpus(void) char buf[128] = "", *p; int idx; - if (!__COMPAT_HAS_CPUMASKS) - return; - possible = scx_bpf_get_possible_cpumask(); online = scx_bpf_get_online_cpumask(); @@ -593,9 +589,6 @@ static void monitor_cpuperf(void) const struct cpumask *online; int i, nr_online_cpus = 0; - if (!__COMPAT_HAS_CPUMASKS) - return; - nr_cpu_ids = scx_bpf_nr_cpu_ids(); online = scx_bpf_get_online_cpumask(); @@ -683,9 +676,6 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(qmap_init) struct bpf_timer *timer; s32 ret; - if (!switch_partial) - __COMPAT_scx_bpf_switch_all(); - print_cpus(); ret = scx_bpf_create_dsq(SHARED_DSQ, -1); diff --git a/tools/sched_ext/scx_qmap.c b/tools/sched_ext/scx_qmap.c index e10ceb170793d..bca36de53903d 100644 --- a/tools/sched_ext/scx_qmap.c +++ b/tools/sched_ext/scx_qmap.c @@ -104,8 +104,7 @@ int main(int argc, char **argv) skel->rodata->suppress_dump = true; break; case 'p': - skel->rodata->switch_partial = true; - skel->struct_ops.qmap_ops->flags |= __COMPAT_SCX_OPS_SWITCH_PARTIAL; + skel->struct_ops.qmap_ops->flags |= SCX_OPS_SWITCH_PARTIAL; break; case 'v': verbose = true; diff --git a/tools/sched_ext/scx_simple.bpf.c b/tools/sched_ext/scx_simple.bpf.c index 6fc66ab9877af..ed7e8d535fc5f 100644 --- a/tools/sched_ext/scx_simple.bpf.c +++ b/tools/sched_ext/scx_simple.bpf.c @@ -136,7 +136,6 @@ void BPF_STRUCT_OPS(simple_enable, struct task_struct *p) s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init) { - __COMPAT_scx_bpf_switch_all(); return scx_bpf_create_dsq(SHARED_DSQ, -1); } From 1b94fc1abc5bd5c25554ae59f0a8a4a86ae0047b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 17 Jun 2024 10:31:16 -1000 Subject: [PATCH 535/537] scx: Fix error condition check in __scx_bpf_consume_task() __scx_bpf_consume_task() should test whether the task is still on the DSQ that's being iterated before erroring out if it's on a local DSQ. Otherwise, another racing consumer can move the task to a local DSQ triggering a spurious error. Relocate the error condition. (cherry picked from commit e2c8568eb3075a187de5130fae6a9b3b664766a6) --- kernel/sched/ext.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 2cf7dd5d5db1e..0e46697efe77b 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -6092,14 +6092,14 @@ __bpf_kfunc bool __scx_bpf_consume_task(unsigned long it, struct task_struct *p) */ dsq = READ_ONCE(p->scx.dsq); + if (unlikely(!dsq || dsq != kit_dsq)) + return false; + if (unlikely(dsq->id == SCX_DSQ_LOCAL)) { scx_ops_error("local DSQ not allowed"); return false; } - if (unlikely(!dsq || dsq != kit_dsq)) - return false; - if (!scx_kf_allowed(SCX_KF_DISPATCH)) return false; From 96926f85abe18c77d98927bea96c847f7fa8cdb3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 17 Jun 2024 11:19:28 -1000 Subject: [PATCH 536/537] selftests/sched_ext: Build fix __COMPAT_SCX_OPS_SWITCH_PARTIAL is no more. Use SCX_OPS_SWITCH_PARTIAL instead. (cherry picked from commit 9cd61b5803e426fe199b03124eb72dd147be414d) --- tools/testing/selftests/sched_ext/init_enable_count.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/sched_ext/init_enable_count.c b/tools/testing/selftests/sched_ext/init_enable_count.c index ef9da0a508467..97d45f1e5597e 100644 --- a/tools/testing/selftests/sched_ext/init_enable_count.c +++ b/tools/testing/selftests/sched_ext/init_enable_count.c @@ -24,7 +24,7 @@ open_load_prog(bool global) SCX_BUG_ON(!skel, "Failed to open skel"); if (!global) - skel->struct_ops.init_enable_count_ops->flags |= __COMPAT_SCX_OPS_SWITCH_PARTIAL; + skel->struct_ops.init_enable_count_ops->flags |= SCX_OPS_SWITCH_PARTIAL; SCX_BUG_ON(init_enable_count__load(skel), "Failed to load skel"); From 85fb1b6e7d3a0d26acc58e2214b0434a320a1be6 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Fri, 21 Jun 2024 14:17:18 -0500 Subject: [PATCH 537/537] v6.10-rc4-scx1 Signed-off-by: David Vernet --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 128344ac87bec..5dcdd628e843d 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc4-scx1 NAME = Baby Opossum Posse # *DOCUMENTATION*