In the Linux kernel, the following vulnerability has been resolved:
interconnect: Fix locking for runpm vs reclaim
For cases where icc_bw_set() can be called in callbaths that could
deadlock against shrinker/reclaim, such as runpm resume, we need to
decouple the icc locking. Introduce a new icc_bw_lock for cases where
we need to serialize bw aggregation and update to decouple that from
paths that require memory allocation such as node/link creation/
destruction.
Fixes this lockdep splat:
======================================================
WARNING: possible circular locking dependency detected
6.2.0-rc8-debug+ #554 Not tainted
------------------------------------------------------
ring0/132 is trying to acquire lock:
ffffff80871916d0 (&gmu->lock){+.+.}-{3:3}, at: a6xx_pm_resume+0xf0/0x234
but task is already holding lock:
ffffffdb5aee57e8 (dma_fence_map){++++}-{0:0}, at: msm_job_run+0x68/0x150
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #4 (dma_fence_map){++++}-{0:0}:
__dma_fence_might_wait+0x74/0xc0
dma_resv_lockdep+0x1f4/0x2f4
do_one_initcall+0x104/0x2bc
kernel_init_freeable+0x344/0x34c
kernel_init+0x30/0x134
ret_from_fork+0x10/0x20
-> #3 (mmu_notifier_invalidate_range_start){+.+.}-{0:0}:
fs_reclaim_acquire+0x80/0xa8
slab_pre_alloc_hook.constprop.0+0x40/0x25c
__kmem_cache_alloc_node+0x60/0x1cc
__kmalloc+0xd8/0x100
topology_parse_cpu_capacity+0x8c/0x178
get_cpu_for_node+0x88/0xc4
parse_cluster+0x1b0/0x28c
parse_cluster+0x8c/0x28c
init_cpu_topology+0x168/0x188
smp_prepare_cpus+0x24/0xf8
kernel_init_freeable+0x18c/0x34c
kernel_init+0x30/0x134
ret_from_fork+0x10/0x20
-> #2 (fs_reclaim){+.+.}-{0:0}:
__fs_reclaim_acquire+0x3c/0x48
fs_reclaim_acquire+0x54/0xa8
slab_pre_alloc_hook.constprop.0+0x40/0x25c
__kmem_cache_alloc_node+0x60/0x1cc
__kmalloc+0xd8/0x100
kzalloc.constprop.0+0x14/0x20
icc_node_create_nolock+0x4c/0xc4
icc_node_create+0x38/0x58
qcom_icc_rpmh_probe+0x1b8/0x248
platform_probe+0x70/0xc4
really_probe+0x158/0x290
__driver_probe_device+0xc8/0xe0
driver_probe_device+0x44/0x100
__driver_attach+0xf8/0x108
bus_for_each_dev+0x78/0xc4
driver_attach+0x2c/0x38
bus_add_driver+0xd0/0x1d8
driver_register+0xbc/0xf8
__platform_driver_register+0x30/0x3c
qnoc_driver_init+0x24/0x30
do_one_initcall+0x104/0x2bc
kernel_init_freeable+0x344/0x34c
kernel_init+0x30/0x134
ret_from_fork+0x10/0x20
-> #1 (icc_lock){+.+.}-{3:3}:
__mutex_lock+0xcc/0x3c8
mutex_lock_nested+0x30/0x44
icc_set_bw+0x88/0x2b4
_set_opp_bw+0x8c/0xd8
_set_opp+0x19c/0x300
dev_pm_opp_set_opp+0x84/0x94
a6xx_gmu_resume+0x18c/0x804
a6xx_pm_resume+0xf8/0x234
adreno_runtime_resume+0x2c/0x38
pm_generic_runtime_resume+0x30/0x44
__rpm_callback+0x15c/0x174
rpm_callback+0x78/0x7c
rpm_resume+0x318/0x524
__pm_runtime_resume+0x78/0xbc
adreno_load_gpu+0xc4/0x17c
msm_open+0x50/0x120
drm_file_alloc+0x17c/0x228
drm_open_helper+0x74/0x118
drm_open+0xa0/0x144
drm_stub_open+0xd4/0xe4
chrdev_open+0x1b8/0x1e4
do_dentry_open+0x2f8/0x38c
vfs_open+0x34/0x40
path_openat+0x64c/0x7b4
do_filp_open+0x54/0xc4
do_sys_openat2+0x9c/0x100
do_sys_open+0x50/0x7c
__arm64_sys_openat+0x28/0x34
invoke_syscall+0x8c/0x128
el0_svc_common.constprop.0+0xa0/0x11c
do_el0_
---truncated---
interconnect: Fix locking for runpm vs reclaim
For cases where icc_bw_set() can be called in callbaths that could
deadlock against shrinker/reclaim, such as runpm resume, we need to
decouple the icc locking. Introduce a new icc_bw_lock for cases where
we need to serialize bw aggregation and update to decouple that from
paths that require memory allocation such as node/link creation/
destruction.
Fixes this lockdep splat:
======================================================
WARNING: possible circular locking dependency detected
6.2.0-rc8-debug+ #554 Not tainted
------------------------------------------------------
ring0/132 is trying to acquire lock:
ffffff80871916d0 (&gmu->lock){+.+.}-{3:3}, at: a6xx_pm_resume+0xf0/0x234
but task is already holding lock:
ffffffdb5aee57e8 (dma_fence_map){++++}-{0:0}, at: msm_job_run+0x68/0x150
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #4 (dma_fence_map){++++}-{0:0}:
__dma_fence_might_wait+0x74/0xc0
dma_resv_lockdep+0x1f4/0x2f4
do_one_initcall+0x104/0x2bc
kernel_init_freeable+0x344/0x34c
kernel_init+0x30/0x134
ret_from_fork+0x10/0x20
-> #3 (mmu_notifier_invalidate_range_start){+.+.}-{0:0}:
fs_reclaim_acquire+0x80/0xa8
slab_pre_alloc_hook.constprop.0+0x40/0x25c
__kmem_cache_alloc_node+0x60/0x1cc
__kmalloc+0xd8/0x100
topology_parse_cpu_capacity+0x8c/0x178
get_cpu_for_node+0x88/0xc4
parse_cluster+0x1b0/0x28c
parse_cluster+0x8c/0x28c
init_cpu_topology+0x168/0x188
smp_prepare_cpus+0x24/0xf8
kernel_init_freeable+0x18c/0x34c
kernel_init+0x30/0x134
ret_from_fork+0x10/0x20
-> #2 (fs_reclaim){+.+.}-{0:0}:
__fs_reclaim_acquire+0x3c/0x48
fs_reclaim_acquire+0x54/0xa8
slab_pre_alloc_hook.constprop.0+0x40/0x25c
__kmem_cache_alloc_node+0x60/0x1cc
__kmalloc+0xd8/0x100
kzalloc.constprop.0+0x14/0x20
icc_node_create_nolock+0x4c/0xc4
icc_node_create+0x38/0x58
qcom_icc_rpmh_probe+0x1b8/0x248
platform_probe+0x70/0xc4
really_probe+0x158/0x290
__driver_probe_device+0xc8/0xe0
driver_probe_device+0x44/0x100
__driver_attach+0xf8/0x108
bus_for_each_dev+0x78/0xc4
driver_attach+0x2c/0x38
bus_add_driver+0xd0/0x1d8
driver_register+0xbc/0xf8
__platform_driver_register+0x30/0x3c
qnoc_driver_init+0x24/0x30
do_one_initcall+0x104/0x2bc
kernel_init_freeable+0x344/0x34c
kernel_init+0x30/0x134
ret_from_fork+0x10/0x20
-> #1 (icc_lock){+.+.}-{3:3}:
__mutex_lock+0xcc/0x3c8
mutex_lock_nested+0x30/0x44
icc_set_bw+0x88/0x2b4
_set_opp_bw+0x8c/0xd8
_set_opp+0x19c/0x300
dev_pm_opp_set_opp+0x84/0x94
a6xx_gmu_resume+0x18c/0x804
a6xx_pm_resume+0xf8/0x234
adreno_runtime_resume+0x2c/0x38
pm_generic_runtime_resume+0x30/0x44
__rpm_callback+0x15c/0x174
rpm_callback+0x78/0x7c
rpm_resume+0x318/0x524
__pm_runtime_resume+0x78/0xbc
adreno_load_gpu+0xc4/0x17c
msm_open+0x50/0x120
drm_file_alloc+0x17c/0x228
drm_open_helper+0x74/0x118
drm_open+0xa0/0x144
drm_stub_open+0xd4/0xe4
chrdev_open+0x1b8/0x1e4
do_dentry_open+0x2f8/0x38c
vfs_open+0x34/0x40
path_openat+0x64c/0x7b4
do_filp_open+0x54/0xc4
do_sys_openat2+0x9c/0x100
do_sys_open+0x50/0x7c
__arm64_sys_openat+0x28/0x34
invoke_syscall+0x8c/0x128
el0_svc_common.constprop.0+0xa0/0x11c
do_el0_
---truncated---
Metrics
Affected Vendors & Products
Advisories
No advisories yet.
Fixes
Solution
No solution given by the vendor.
Workaround
No workaround given by the vendor.
References
History
Wed, 24 Dec 2025 11:15:00 +0000
| Type | Values Removed | Values Added |
|---|---|---|
| Description | In the Linux kernel, the following vulnerability has been resolved: interconnect: Fix locking for runpm vs reclaim For cases where icc_bw_set() can be called in callbaths that could deadlock against shrinker/reclaim, such as runpm resume, we need to decouple the icc locking. Introduce a new icc_bw_lock for cases where we need to serialize bw aggregation and update to decouple that from paths that require memory allocation such as node/link creation/ destruction. Fixes this lockdep splat: ====================================================== WARNING: possible circular locking dependency detected 6.2.0-rc8-debug+ #554 Not tainted ------------------------------------------------------ ring0/132 is trying to acquire lock: ffffff80871916d0 (&gmu->lock){+.+.}-{3:3}, at: a6xx_pm_resume+0xf0/0x234 but task is already holding lock: ffffffdb5aee57e8 (dma_fence_map){++++}-{0:0}, at: msm_job_run+0x68/0x150 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #4 (dma_fence_map){++++}-{0:0}: __dma_fence_might_wait+0x74/0xc0 dma_resv_lockdep+0x1f4/0x2f4 do_one_initcall+0x104/0x2bc kernel_init_freeable+0x344/0x34c kernel_init+0x30/0x134 ret_from_fork+0x10/0x20 -> #3 (mmu_notifier_invalidate_range_start){+.+.}-{0:0}: fs_reclaim_acquire+0x80/0xa8 slab_pre_alloc_hook.constprop.0+0x40/0x25c __kmem_cache_alloc_node+0x60/0x1cc __kmalloc+0xd8/0x100 topology_parse_cpu_capacity+0x8c/0x178 get_cpu_for_node+0x88/0xc4 parse_cluster+0x1b0/0x28c parse_cluster+0x8c/0x28c init_cpu_topology+0x168/0x188 smp_prepare_cpus+0x24/0xf8 kernel_init_freeable+0x18c/0x34c kernel_init+0x30/0x134 ret_from_fork+0x10/0x20 -> #2 (fs_reclaim){+.+.}-{0:0}: __fs_reclaim_acquire+0x3c/0x48 fs_reclaim_acquire+0x54/0xa8 slab_pre_alloc_hook.constprop.0+0x40/0x25c __kmem_cache_alloc_node+0x60/0x1cc __kmalloc+0xd8/0x100 kzalloc.constprop.0+0x14/0x20 icc_node_create_nolock+0x4c/0xc4 icc_node_create+0x38/0x58 qcom_icc_rpmh_probe+0x1b8/0x248 platform_probe+0x70/0xc4 really_probe+0x158/0x290 __driver_probe_device+0xc8/0xe0 driver_probe_device+0x44/0x100 __driver_attach+0xf8/0x108 bus_for_each_dev+0x78/0xc4 driver_attach+0x2c/0x38 bus_add_driver+0xd0/0x1d8 driver_register+0xbc/0xf8 __platform_driver_register+0x30/0x3c qnoc_driver_init+0x24/0x30 do_one_initcall+0x104/0x2bc kernel_init_freeable+0x344/0x34c kernel_init+0x30/0x134 ret_from_fork+0x10/0x20 -> #1 (icc_lock){+.+.}-{3:3}: __mutex_lock+0xcc/0x3c8 mutex_lock_nested+0x30/0x44 icc_set_bw+0x88/0x2b4 _set_opp_bw+0x8c/0xd8 _set_opp+0x19c/0x300 dev_pm_opp_set_opp+0x84/0x94 a6xx_gmu_resume+0x18c/0x804 a6xx_pm_resume+0xf8/0x234 adreno_runtime_resume+0x2c/0x38 pm_generic_runtime_resume+0x30/0x44 __rpm_callback+0x15c/0x174 rpm_callback+0x78/0x7c rpm_resume+0x318/0x524 __pm_runtime_resume+0x78/0xbc adreno_load_gpu+0xc4/0x17c msm_open+0x50/0x120 drm_file_alloc+0x17c/0x228 drm_open_helper+0x74/0x118 drm_open+0xa0/0x144 drm_stub_open+0xd4/0xe4 chrdev_open+0x1b8/0x1e4 do_dentry_open+0x2f8/0x38c vfs_open+0x34/0x40 path_openat+0x64c/0x7b4 do_filp_open+0x54/0xc4 do_sys_openat2+0x9c/0x100 do_sys_open+0x50/0x7c __arm64_sys_openat+0x28/0x34 invoke_syscall+0x8c/0x128 el0_svc_common.constprop.0+0xa0/0x11c do_el0_ ---truncated--- | |
| Title | interconnect: Fix locking for runpm vs reclaim | |
| First Time appeared |
Linux
Linux linux Kernel |
|
| CPEs | cpe:2.3:o:linux:linux_kernel:*:*:*:*:*:*:*:* | |
| Vendors & Products |
Linux
Linux linux Kernel |
|
| References |
|
Projects
Sign in to view the affected projects.
Status: PUBLISHED
Assigner: Linux
Published:
Updated: 2025-12-24T10:55:45.518Z
Reserved: 2025-12-24T10:53:46.178Z
Link: CVE-2023-54013
No data.
Status : Received
Published: 2025-12-24T11:15:54.270
Modified: 2025-12-24T11:15:54.270
Link: CVE-2023-54013
No data.
OpenCVE Enrichment
No data.
Weaknesses
No weakness.