|
| 1 | +TITLE: possible deadlock in fakeName |
| 2 | +TYPE: LOCKDEP |
| 3 | +EXECUTOR: proc=5, id=7376 |
| 4 | + |
| 5 | +[ 492.198014][T24950] ====================================================== |
| 6 | +[ 492.198599][T24950] WARNING: possible circular locking dependency detected |
| 7 | +[ 492.199166][T24950] 6.15.0-rc7-dirty #2 Not tainted |
| 8 | +[ 492.199662][T24950] ------------------------------------------------------ |
| 9 | +[ 492.200243][T24950] syz.5.7376/24950 is trying to acquire lock: |
| 10 | +[ 492.200764][T24950] ffff888106a71958 (&q->elevator_lock){+.+.}-{4:4}, at: _Z8fakeNameiii+0x49a/0x1a10 |
| 11 | +[ 492.201679][T24950] |
| 12 | +[ 492.201679][T24950] but task is already holding lock: |
| 13 | +[ 492.202324][T24950] ffff888106a71428 (&q->q_usage_counter(io)#55){++++}-{0:0}, at: nbd_start_device+0x16c/0xac0 |
| 14 | +[ 492.203199][T24950] |
| 15 | +[ 492.203199][T24950] which lock already depends on the new lock. |
| 16 | +[ 492.203199][T24950] |
| 17 | +[ 492.204282][T24950] |
| 18 | +[ 492.204282][T24950] the existing dependency chain (in reverse order) is: |
| 19 | +[ 492.205026][T24950] |
| 20 | +[ 492.205026][T24950] -> #2 (&q->q_usage_counter(io)#55){++++}-{0:0}: |
| 21 | +[ 492.205755][T24950] lock_acquire+0x120/0x360 |
| 22 | +[ 492.206191][T24950] blk_alloc_queue+0x538/0x620 |
| 23 | +[ 492.207668][T24950] __blk_mq_alloc_disk+0x164/0x350 |
| 24 | +[ 492.208143][T24950] nbd_dev_add+0x478/0xb10 |
| 25 | +[ 492.208580][T24950] nbd_init+0x21a/0x2d0 |
| 26 | +[ 492.208987][T24950] do_one_initcall+0x233/0x820 |
| 27 | +[ 492.209427][T24950] do_initcall_level+0x137/0x1f0 |
| 28 | +[ 492.209898][T24950] do_initcalls+0x69/0xd0 |
| 29 | +[ 492.211387][T24950] kernel_init_freeable+0x3d9/0x570 |
| 30 | +[ 492.212872][T24950] kernel_init+0x1d/0x1d0 |
| 31 | +[ 492.214106][T24950] ret_from_fork+0x4b/0x80 |
| 32 | +[ 492.215349][T24950] ret_from_fork_asm+0x1a/0x30 |
| 33 | +[ 492.216687][T24950] |
| 34 | +[ 492.216687][T24950] -> #1 (fs_reclaim){+.+.}-{0:0}: |
| 35 | +[ 492.218501][T24950] lock_acquire+0x120/0x360 |
| 36 | +[ 492.219773][T24950] fs_reclaim_acquire+0x72/0x100 |
| 37 | +[ 492.221247][T24950] kmem_cache_alloc_noprof+0x44/0x3c0 |
| 38 | +[ 492.222382][T24950] __kernfs_new_node+0xd7/0x7f0 |
| 39 | +[ 492.223332][T24950] kernfs_new_node+0x102/0x210 |
| 40 | +[ 492.224319][T24950] kernfs_create_dir_ns+0x44/0x130 |
| 41 | +[ 492.225321][T24950] sysfs_create_dir_ns+0x123/0x280 |
| 42 | +[ 492.226310][T24950] kobject_add_internal+0x59f/0xb40 |
| 43 | +[ 492.227320][T24950] kobject_add+0x155/0x220 |
| 44 | +[ 492.228199][T24950] elv_register_queue+0xdb/0x260 |
| 45 | +[ 492.229196][T24950] blk_register_queue+0x375/0x450 |
| 46 | +[ 492.230186][T24950] add_disk_fwnode+0x77f/0x10e0 |
| 47 | +[ 492.231152][T24950] _RNvXCsktjF9JQNZ8U_5rnullNtB2_13NullBlkModuleNtCs43vyB533jt3_6kernel13InPlaceModule4init+0x904/0xc30 |
| 48 | +[ 492.232707][T24950] __rnull_mod_init+0x1a/0x70 |
| 49 | +[ 492.233328][T24950] do_one_initcall+0x233/0x820 |
| 50 | +[ 492.233954][T24950] do_initcall_level+0x137/0x1f0 |
| 51 | +[ 492.234606][T24950] do_initcalls+0x69/0xd0 |
| 52 | +[ 492.235198][T24950] kernel_init_freeable+0x3d9/0x570 |
| 53 | +[ 492.235883][T24950] kernel_init+0x1d/0x1d0 |
| 54 | +[ 492.236478][T24950] ret_from_fork+0x4b/0x80 |
| 55 | +[ 492.237083][T24950] ret_from_fork_asm+0x1a/0x30 |
| 56 | +[ 492.237709][T24950] |
| 57 | +[ 492.237709][T24950] -> #0 (&q->elevator_lock){+.+.}-{4:4}: |
| 58 | +[ 492.238636][T24950] validate_chain+0xb9b/0x2140 |
| 59 | +[ 492.239262][T24950] __lock_acquire+0xaac/0xd20 |
| 60 | +[ 492.239881][T24950] lock_acquire+0x120/0x360 |
| 61 | +[ 492.240504][T24950] __mutex_lock+0x182/0xe80 |
| 62 | +[ 492.241103][T24950] _Z8fakeNameiii+0x49a/0x1a10 |
| 63 | +[ 492.241900][T24950] nbd_start_device+0x16c/0xac0 |
| 64 | +[ 492.242492][T24950] nbd_genl_connect+0x1250/0x1930 |
| 65 | +[ 492.242954][T24950] genl_family_rcv_msg_doit+0x212/0x300 |
| 66 | +[ 492.243465][T24950] genl_rcv_msg+0x60e/0x790 |
| 67 | +[ 492.243901][T24950] netlink_rcv_skb+0x21c/0x490 |
| 68 | +[ 492.244352][T24950] genl_rcv+0x28/0x40 |
| 69 | +[ 492.244734][T24950] netlink_unicast+0x758/0x8d0 |
| 70 | +[ 492.245165][T24950] netlink_sendmsg+0x805/0xb30 |
| 71 | +[ 492.245611][T24950] __sock_sendmsg+0x21c/0x270 |
| 72 | +[ 492.246055][T24950] ____sys_sendmsg+0x505/0x830 |
| 73 | +[ 492.246500][T24950] ___sys_sendmsg+0x21f/0x2a0 |
| 74 | +[ 492.246948][T24950] __x64_sys_sendmsg+0x19b/0x260 |
| 75 | +[ 492.247396][T24950] do_syscall_64+0xf6/0x210 |
| 76 | +[ 492.247817][T24950] entry_SYSCALL_64_after_hwframe+0x77/0x7f |
| 77 | +[ 492.248351][T24950] |
| 78 | +[ 492.248351][T24950] other info that might help us debug this: |
| 79 | +[ 492.248351][T24950] |
| 80 | +[ 492.249170][T24950] Chain exists of: |
| 81 | +[ 492.249170][T24950] &q->elevator_lock --> fs_reclaim --> &q->q_usage_counter(io)#55 |
| 82 | +[ 492.249170][T24950] |
| 83 | +[ 492.250308][T24950] Possible unsafe locking scenario: |
| 84 | +[ 492.250308][T24950] |
| 85 | +[ 492.250911][T24950] CPU0 CPU1 |
| 86 | +[ 492.251357][T24950] ---- ---- |
| 87 | +[ 492.251804][T24950] lock(&q->q_usage_counter(io)#55); |
| 88 | +[ 492.252287][T24950] lock(fs_reclaim); |
| 89 | +[ 492.252868][T24950] lock(&q->q_usage_counter(io)#55); |
| 90 | +[ 492.253541][T24950] lock(&q->elevator_lock); |
| 91 | +[ 492.253948][T24950] |
| 92 | +[ 492.253948][T24950] *** DEADLOCK *** |
| 93 | +[ 492.253948][T24950] |
| 94 | +[ 492.254623][T24950] 6 locks held by syz.5.7376/24950: |
| 95 | +[ 492.255064][T24950] #0: ffffffff8f76e570 (cb_lock){++++}-{4:4}, at: genl_rcv+0x19/0x40 |
| 96 | +[ 492.255786][T24950] #1: ffffffff8f76e388 (genl_mutex){+.+.}-{4:4}, at: genl_rcv_msg+0x10d/0x790 |
| 97 | +[ 492.256540][T24950] #2: ffff88802383a198 (&nbd->config_lock){+.+.}-{4:4}, at: nbd_genl_connect+0x94f/0x1930 |
| 98 | +[ 492.257385][T24950] #3: ffff88802383a0d8 (&set->tag_list_lock){+.+.}-{4:4}, at: blk_mq_update_nr_hw_queues+0xac/0x1a10 |
| 99 | +[ 492.258321][T24950] #4: ffff888106a71428 (&q->q_usage_counter(io)#55){++++}-{0:0}, at: nbd_start_device+0x16c/0xac0 |
| 100 | +[ 492.259234][T24950] #5: ffff888106a71460 (&q->q_usage_counter(queue)#7){+.+.}-{0:0}, at: nbd_start_device+0x16c/0xac0 |
| 101 | +[ 492.260176][T24950] |
| 102 | +[ 492.260176][T24950] stack backtrace: |
| 103 | +[ 492.260687][T24950] CPU: 0 UID: 0 PID: 24950 Comm: syz.5.7376 Not tainted 6.15.0-rc7-dirty #2 PREEMPT(full) |
| 104 | +[ 492.260700][T24950] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 |
| 105 | +[ 492.260709][T24950] Call Trace: |
| 106 | +[ 492.260715][T24950] <TASK> |
| 107 | +[ 492.260721][T24950] dump_stack_lvl+0x189/0x250 |
| 108 | +[ 492.260734][T24950] ? __pfx_dump_stack_lvl+0x10/0x10 |
| 109 | +[ 492.260746][T24950] ? __pfx__printk+0x10/0x10 |
| 110 | +[ 492.260760][T24950] ? print_lock_name+0xde/0x100 |
| 111 | +[ 492.260772][T24950] print_circular_bug+0x2ee/0x310 |
| 112 | +[ 492.260789][T24950] check_noncircular+0x134/0x160 |
| 113 | +[ 492.260806][T24950] validate_chain+0xb9b/0x2140 |
| 114 | +[ 492.260826][T24950] __lock_acquire+0xaac/0xd20 |
| 115 | +[ 492.260840][T24950] ? blk_mq_update_nr_hw_queues+0x49a/0x1a10 |
| 116 | +[ 492.260856][T24950] lock_acquire+0x120/0x360 |
| 117 | +[ 492.260867][T24950] ? blk_mq_update_nr_hw_queues+0x49a/0x1a10 |
| 118 | +[ 492.260887][T24950] __mutex_lock+0x182/0xe80 |
| 119 | +[ 492.260899][T24950] ? blk_mq_update_nr_hw_queues+0x49a/0x1a10 |
| 120 | +[ 492.260918][T24950] ? blk_mq_update_nr_hw_queues+0x49a/0x1a10 |
| 121 | +[ 492.260935][T24950] ? __pfx___mutex_lock+0x10/0x10 |
| 122 | +[ 492.260949][T24950] ? __kasan_kmalloc+0x93/0xb0 |
| 123 | +[ 492.260967][T24950] ? blk_mq_update_nr_hw_queues+0x47b/0x1a10 |
| 124 | +[ 492.260985][T24950] blk_mq_update_nr_hw_queues+0x49a/0x1a10 |
| 125 | +[ 492.261006][T24950] ? __pfx_blk_mq_update_nr_hw_queues+0x10/0x10 |
| 126 | +[ 492.261023][T24950] ? nbd_add_socket+0x688/0x9a0 |
| 127 | +[ 492.261034][T24950] nbd_start_device+0x16c/0xac0 |
| 128 | +[ 492.261045][T24950] ? __nla_parse+0x40/0x60 |
| 129 | +[ 492.261059][T24950] nbd_genl_connect+0x1250/0x1930 |
| 130 | +[ 492.261078][T24950] ? __pfx_nbd_genl_connect+0x10/0x10 |
| 131 | +[ 492.261100][T24950] ? genl_family_rcv_msg_attrs_parse+0x1c9/0x2a0 |
| 132 | +[ 492.261118][T24950] genl_family_rcv_msg_doit+0x212/0x300 |
| 133 | +[ 492.261136][T24950] ? __pfx_genl_family_rcv_msg_doit+0x10/0x10 |
| 134 | +[ 492.261156][T24950] ? stack_depot_save_flags+0x40/0x910 |
| 135 | +[ 492.261168][T24950] genl_rcv_msg+0x60e/0x790 |
| 136 | +[ 492.261185][T24950] ? __pfx_genl_rcv_msg+0x10/0x10 |
| 137 | +[ 492.261199][T24950] ? __pfx_nbd_genl_connect+0x10/0x10 |
| 138 | +[ 492.261219][T24950] netlink_rcv_skb+0x21c/0x490 |
| 139 | +[ 492.261231][T24950] ? __pfx_genl_rcv_msg+0x10/0x10 |
| 140 | +[ 492.261246][T24950] ? __pfx_netlink_rcv_skb+0x10/0x10 |
| 141 | +[ 492.261263][T24950] ? down_read+0x1ad/0x2e0 |
| 142 | +[ 492.261277][T24950] genl_rcv+0x28/0x40 |
| 143 | +[ 492.261291][T24950] netlink_unicast+0x758/0x8d0 |
| 144 | +[ 492.261304][T24950] netlink_sendmsg+0x805/0xb30 |
| 145 | +[ 492.261319][T24950] ? __pfx_netlink_sendmsg+0x10/0x10 |
| 146 | +[ 492.261332][T24950] ? aa_sock_msg_perm+0x94/0x160 |
| 147 | +[ 492.261349][T24950] ? bpf_lsm_socket_sendmsg+0x9/0x20 |
| 148 | +[ 492.261365][T24950] ? __pfx_netlink_sendmsg+0x10/0x10 |
| 149 | +[ 492.261378][T24950] __sock_sendmsg+0x21c/0x270 |
| 150 | +[ 492.261388][T24950] ____sys_sendmsg+0x505/0x830 |
| 151 | +[ 492.261404][T24950] ? __pfx_____sys_sendmsg+0x10/0x10 |
| 152 | +[ 492.261420][T24950] ? import_iovec+0x74/0xa0 |
| 153 | +[ 492.261436][T24950] ___sys_sendmsg+0x21f/0x2a0 |
| 154 | +[ 492.261450][T24950] ? __pfx____sys_sendmsg+0x10/0x10 |
| 155 | +[ 492.261474][T24950] ? __fget_files+0x2a/0x420 |
| 156 | +[ 492.261485][T24950] ? __fget_files+0x3a0/0x420 |
| 157 | +[ 492.261499][T24950] __x64_sys_sendmsg+0x19b/0x260 |
| 158 | +[ 492.261514][T24950] ? __pfx___x64_sys_sendmsg+0x10/0x10 |
| 159 | +[ 492.261532][T24950] ? do_syscall_64+0xba/0x210 |
| 160 | +[ 492.261545][T24950] do_syscall_64+0xf6/0x210 |
| 161 | +[ 492.261558][T24950] ? clear_bhb_loop+0x60/0xb0 |
| 162 | +[ 492.261571][T24950] entry_SYSCALL_64_after_hwframe+0x77/0x7f |
| 163 | +[ 492.261582][T24950] RIP: 0033:0x7fc91838e969 |
| 164 | +[ 492.261593][T24950] Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48 |
| 165 | +[ 492.261603][T24950] RSP: 002b:00007fc9191d7038 EFLAGS: 00000246 ORIG_RAX: 000000000000002e |
| 166 | +[ 492.261614][T24950] RAX: ffffffffffffffda RBX: 00007fc9185b5fa0 RCX: 00007fc91838e969 |
| 167 | +[ 492.261623][T24950] RDX: 0000000000004000 RSI: 0000200000000300 RDI: 0000000000000004 |
| 168 | +[ 492.261631][T24950] RBP: 00007fc918410ab1 R08: 0000000000000000 R09: 0000000000000000 |
| 169 | +[ 492.261638][T24950] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 |
| 170 | +[ 492.261646][T24950] R13: 0000000000000000 R14: 00007fc9185b5fa0 R15: 00007ffef33da528 |
| 171 | +[ 492.261658][T24950] </TASK> |
| 172 | + |
| 173 | +REPORT: |
| 174 | +====================================================== |
| 175 | +WARNING: possible circular locking dependency detected |
| 176 | +6.15.0-rc7-dirty #2 Not tainted |
| 177 | +------------------------------------------------------ |
| 178 | +syz.5.7376/24950 is trying to acquire lock: |
| 179 | +ffff888106a71958 (&q->elevator_lock){+.+.}-{4:4}, at: fakeName+0x49a/0x1a10 |
| 180 | + |
| 181 | +but task is already holding lock: |
| 182 | +ffff888106a71428 (&q->q_usage_counter(io)#55){++++}-{0:0}, at: nbd_start_device+0x16c/0xac0 |
| 183 | + |
| 184 | +which lock already depends on the new lock. |
| 185 | + |
| 186 | + |
| 187 | +the existing dependency chain (in reverse order) is: |
| 188 | + |
| 189 | +-> #2 (&q->q_usage_counter(io)#55){++++}-{0:0}: |
| 190 | + lock_acquire+0x120/0x360 |
| 191 | + blk_alloc_queue+0x538/0x620 |
| 192 | + __blk_mq_alloc_disk+0x164/0x350 |
| 193 | + nbd_dev_add+0x478/0xb10 |
| 194 | + nbd_init+0x21a/0x2d0 |
| 195 | + do_one_initcall+0x233/0x820 |
| 196 | + do_initcall_level+0x137/0x1f0 |
| 197 | + do_initcalls+0x69/0xd0 |
| 198 | + kernel_init_freeable+0x3d9/0x570 |
| 199 | + kernel_init+0x1d/0x1d0 |
| 200 | + ret_from_fork+0x4b/0x80 |
| 201 | + ret_from_fork_asm+0x1a/0x30 |
| 202 | + |
| 203 | +-> #1 (fs_reclaim){+.+.}-{0:0}: |
| 204 | + lock_acquire+0x120/0x360 |
| 205 | + fs_reclaim_acquire+0x72/0x100 |
| 206 | + kmem_cache_alloc_noprof+0x44/0x3c0 |
| 207 | + __kernfs_new_node+0xd7/0x7f0 |
| 208 | + kernfs_new_node+0x102/0x210 |
| 209 | + kernfs_create_dir_ns+0x44/0x130 |
| 210 | + sysfs_create_dir_ns+0x123/0x280 |
| 211 | + kobject_add_internal+0x59f/0xb40 |
| 212 | + kobject_add+0x155/0x220 |
| 213 | + elv_register_queue+0xdb/0x260 |
| 214 | + blk_register_queue+0x375/0x450 |
| 215 | + add_disk_fwnode+0x77f/0x10e0 |
| 216 | + <rnull::NullBlkModule as kernel::InPlaceModule>::init+0x904/0xc30 |
| 217 | + __rnull_mod_init+0x1a/0x70 |
| 218 | + do_one_initcall+0x233/0x820 |
| 219 | + do_initcall_level+0x137/0x1f0 |
| 220 | + do_initcalls+0x69/0xd0 |
| 221 | + kernel_init_freeable+0x3d9/0x570 |
| 222 | + kernel_init+0x1d/0x1d0 |
| 223 | + ret_from_fork+0x4b/0x80 |
| 224 | + ret_from_fork_asm+0x1a/0x30 |
| 225 | + |
| 226 | +-> #0 (&q->elevator_lock){+.+.}-{4:4}: |
| 227 | + validate_chain+0xb9b/0x2140 |
| 228 | + __lock_acquire+0xaac/0xd20 |
| 229 | + lock_acquire+0x120/0x360 |
| 230 | + __mutex_lock+0x182/0xe80 |
| 231 | + fakeName+0x49a/0x1a10 |
| 232 | + nbd_start_device+0x16c/0xac0 |
| 233 | + nbd_genl_connect+0x1250/0x1930 |
| 234 | + genl_family_rcv_msg_doit+0x212/0x300 |
| 235 | + genl_rcv_msg+0x60e/0x790 |
| 236 | + netlink_rcv_skb+0x21c/0x490 |
| 237 | + genl_rcv+0x28/0x40 |
| 238 | + netlink_unicast+0x758/0x8d0 |
| 239 | + netlink_sendmsg+0x805/0xb30 |
| 240 | + __sock_sendmsg+0x21c/0x270 |
| 241 | + ____sys_sendmsg+0x505/0x830 |
| 242 | + ___sys_sendmsg+0x21f/0x2a0 |
| 243 | + __x64_sys_sendmsg+0x19b/0x260 |
| 244 | + do_syscall_64+0xf6/0x210 |
| 245 | + entry_SYSCALL_64_after_hwframe+0x77/0x7f |
| 246 | + |
| 247 | +other info that might help us debug this: |
| 248 | + |
| 249 | +Chain exists of: |
| 250 | + &q->elevator_lock --> fs_reclaim --> &q->q_usage_counter(io)#55 |
| 251 | + |
| 252 | + Possible unsafe locking scenario: |
| 253 | + |
| 254 | + CPU0 CPU1 |
| 255 | + ---- ---- |
| 256 | + lock(&q->q_usage_counter(io)#55); |
| 257 | + lock(fs_reclaim); |
| 258 | + lock(&q->q_usage_counter(io)#55); |
| 259 | + lock(&q->elevator_lock); |
| 260 | + |
| 261 | + *** DEADLOCK *** |
| 262 | + |
| 263 | +6 locks held by syz.5.7376/24950: |
| 264 | + #0: ffffffff8f76e570 (cb_lock){++++}-{4:4}, at: genl_rcv+0x19/0x40 |
| 265 | + #1: ffffffff8f76e388 (genl_mutex){+.+.}-{4:4}, at: genl_rcv_msg+0x10d/0x790 |
| 266 | + #2: ffff88802383a198 (&nbd->config_lock){+.+.}-{4:4}, at: nbd_genl_connect+0x94f/0x1930 |
| 267 | + #3: ffff88802383a0d8 (&set->tag_list_lock){+.+.}-{4:4}, at: blk_mq_update_nr_hw_queues+0xac/0x1a10 |
| 268 | + #4: ffff888106a71428 (&q->q_usage_counter(io)#55){++++}-{0:0}, at: nbd_start_device+0x16c/0xac0 |
| 269 | + #5: ffff888106a71460 (&q->q_usage_counter(queue)#7){+.+.}-{0:0}, at: nbd_start_device+0x16c/0xac0 |
| 270 | + |
| 271 | +stack backtrace: |
| 272 | +CPU: 0 UID: 0 PID: 24950 Comm: syz.5.7376 Not tainted 6.15.0-rc7-dirty #2 PREEMPT(full) |
| 273 | +Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 |
| 274 | +Call Trace: |
| 275 | + <TASK> |
| 276 | + dump_stack_lvl+0x189/0x250 |
| 277 | + print_circular_bug+0x2ee/0x310 |
| 278 | + check_noncircular+0x134/0x160 |
| 279 | + validate_chain+0xb9b/0x2140 |
| 280 | + __lock_acquire+0xaac/0xd20 |
| 281 | + lock_acquire+0x120/0x360 |
| 282 | + __mutex_lock+0x182/0xe80 |
| 283 | + blk_mq_update_nr_hw_queues+0x49a/0x1a10 |
| 284 | + nbd_start_device+0x16c/0xac0 |
| 285 | + nbd_genl_connect+0x1250/0x1930 |
| 286 | + genl_family_rcv_msg_doit+0x212/0x300 |
| 287 | + genl_rcv_msg+0x60e/0x790 |
| 288 | + netlink_rcv_skb+0x21c/0x490 |
| 289 | + genl_rcv+0x28/0x40 |
| 290 | + netlink_unicast+0x758/0x8d0 |
| 291 | + netlink_sendmsg+0x805/0xb30 |
| 292 | + __sock_sendmsg+0x21c/0x270 |
| 293 | + ____sys_sendmsg+0x505/0x830 |
| 294 | + ___sys_sendmsg+0x21f/0x2a0 |
| 295 | + __x64_sys_sendmsg+0x19b/0x260 |
| 296 | + do_syscall_64+0xf6/0x210 |
| 297 | + entry_SYSCALL_64_after_hwframe+0x77/0x7f |
| 298 | +RIP: 0033:0x7fc91838e969 |
| 299 | +Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48 |
| 300 | +RSP: 002b:00007fc9191d7038 EFLAGS: 00000246 ORIG_RAX: 000000000000002e |
| 301 | +RAX: ffffffffffffffda RBX: 00007fc9185b5fa0 RCX: 00007fc91838e969 |
| 302 | +RDX: 0000000000004000 RSI: 0000200000000300 RDI: 0000000000000004 |
| 303 | +RBP: 00007fc918410ab1 R08: 0000000000000000 R09: 0000000000000000 |
| 304 | +R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 |
| 305 | +R13: 0000000000000000 R14: 00007fc9185b5fa0 R15: 00007ffef33da528 |
| 306 | + </TASK> |
0 commit comments