Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pressing ctrl+\ results in a panic? or segfault? #1647

Open
fira42073 opened this issue Sep 7, 2024 · 4 comments
Open

Pressing ctrl+\ results in a panic? or segfault? #1647

fira42073 opened this issue Sep 7, 2024 · 4 comments
Labels
kind/bug Categorizes issue or PR as related to a bug. triage/accepted Indicates an issue or PR is ready to be actively worked on.

Comments

@fira42073
Copy link

What happened:
After running running
kubectl logs -f deploy/gitlab-webservice-default
and then pressing ctl + \

I get the stacktrace and it crashes (see anything else we need to know section)

What you expected to happen:

I'm not sure about the correct behaviour, but it seems like it received the SIGQUIT signal, so probably quit?

How to reproduce it (as minimally and precisely as possible):

kubectl run txx --image=fedora --command -- sleep 1000000
kubectl logs -f txx

# press ctrl + \

Anything else we need to know?:

stacktrace?

^\SIGQUIT: quit
PC=0x476841 m=0 sigcode=128

goroutine 0 gp=0x3367080 m=0 mp=0x3367d20 [idle]:
runtime.futex(0x3367e60, 0x80, 0x0, 0x0, 0x0, 0x0)
        runtime/sys_linux_amd64.s:557 +0x21 fp=0x7ffdd90e3f00 sp=0x7ffdd90e3ef8 pc=0x476841
runtime.futexsleep(0x7ffdd90e3f78?, 0x410110?, 0x1d90e3fa8?)
        runtime/os_linux.go:69 +0x30 fp=0x7ffdd90e3f50 sp=0x7ffdd90e3f00 pc=0x43a650
runtime.notesleep(0x3367e60)
        runtime/lock_futex.go:170 +0x87 fp=0x7ffdd90e3f88 sp=0x7ffdd90e3f50 pc=0x4102a7
runtime.mPark(...)
        runtime/proc.go:1761
runtime.stopm()
        runtime/proc.go:2782 +0x8c fp=0x7ffdd90e3fb8 sp=0x7ffdd90e3f88 pc=0x4451cc
runtime.findRunnable()
        runtime/proc.go:3512 +0xd5f fp=0x7ffdd90e4130 sp=0x7ffdd90e3fb8 pc=0x446d3f
runtime.schedule()
        runtime/proc.go:3868 +0xb1 fp=0x7ffdd90e4168 sp=0x7ffdd90e4130 pc=0x447e11
runtime.park_m(0xc0000061c0)
        runtime/proc.go:4036 +0x1ec fp=0x7ffdd90e41c0 sp=0x7ffdd90e4168 pc=0x4483ec
runtime.mcall()
        runtime/asm_amd64.s:458 +0x4e fp=0x7ffdd90e41d8 sp=0x7ffdd90e41c0 pc=0x472b6e

goroutine 1 gp=0xc0000061c0 m=nil [sync.Cond.Wait]:
runtime.gopark(0xc000616f80?, 0xc000902000?, 0x1a?, 0x0?, 0x1000?)
        runtime/proc.go:402 +0xce fp=0xc000616f00 sp=0xc000616ee0 pc=0x440bce
runtime.goparkunlock(...)
        runtime/proc.go:408
sync.runtime_notifyListWait(0xc000726f48, 0x2d)
        runtime/sema.go:569 +0x159 fp=0xc000616f50 sp=0xc000616f00 pc=0x470e19
sync.(*Cond).Wait(0xc000616fa0?)
        sync/cond.go:70 +0x85 fp=0xc000616f90 sp=0xc000616f50 pc=0x47e3c5
golang.org/x/net/http2.(*pipe).Read(0xc000726f30, {0xc000017000, 0x1000, 0x1000})
        golang.org/x/[email protected]/http2/pipe.go:76 +0xdf fp=0xc000616ff8 sp=0xc000616f90 pc=0xa2fedf
golang.org/x/net/http2.transportResponseBody.Read({0x427205?}, {0xc000017000?, 0x926800?, 0xc0006170e0?})
        golang.org/x/[email protected]/http2/transport.go:2712 +0x65 fp=0xc000617088 sp=0xc000616ff8 pc=0xa3fa05
bufio.(*Reader).fill(0xc0006172f0)
        bufio/bufio.go:110 +0x103 fp=0xc0006170c0 sp=0xc000617088 pc=0x5758a3
bufio.(*Reader).ReadSlice(0xc0006172f0, 0xa)
        bufio/bufio.go:376 +0x29 fp=0xc000617108 sp=0xc0006170c0 pc=0x5763a9
bufio.(*Reader).collectFragments(0xc0006172f0, 0xa)
        bufio/bufio.go:451 +0x70 fp=0xc0006171b8 sp=0xc000617108 pc=0x5767d0
bufio.(*Reader).ReadBytes(0xc0000b2058?, 0x0?)
        bufio/bufio.go:478 +0x17 fp=0xc000617238 sp=0xc0006171b8 pc=0x5769d7
k8s.io/kubectl/pkg/cmd/logs.DefaultConsumeRequest({0x2361040?, 0xc000a19d40?}, {0x2356f40, 0xc0000b2058})
        k8s.io/[email protected]/pkg/cmd/logs/logs.go:430 +0x234 fp=0xc000617378 sp=0xc000617238 pc=0x19422b4
k8s.io/kubectl/pkg/cmd/logs.LogsOptions.sequentialConsumeRequest({{0x1ec2b58, 0x7}, {0x7ffdd90e5b6d, 0x20}, 0x0, {0x235f5d8, 0xc0002b16c0}, {0xc0009aa140, 0x1, 0x2}, ...}, ...)
        k8s.io/[email protected]/pkg/cmd/logs/logs.go:380 +0x145 fp=0xc000617628 sp=0xc000617378 pc=0x1941de5
k8s.io/kubectl/pkg/cmd/logs.LogsOptions.RunLogs({{0x1ec2b58, 0x7}, {0x7ffdd90e5b6d, 0x20}, 0x0, {0x235f5d8, 0xc0002b16c0}, {0xc0009aa140, 0x1, 0x2}, ...})
        k8s.io/[email protected]/pkg/cmd/logs/logs.go:343 +0x1a5 fp=0xc000617790 sp=0xc000617628 pc=0x19414e5
k8s.io/kubectl/pkg/cmd/logs.NewCmdLogs.func1(0xc00073c308?, {0xc0009aa140?, 0x1?, 0x2?})
        k8s.io/[email protected]/pkg/cmd/logs/logs.go:161 +0x118 fp=0xc000617b38 sp=0xc000617790 pc=0x193fd38
github.com/spf13/cobra.(*Command).execute(0xc00073c308, {0xc0009aa120, 0x2, 0x2})
        github.com/spf13/[email protected]/command.go:944 +0x867 fp=0xc000617c58 sp=0xc000617b38 pc=0x5cb0c7
github.com/spf13/cobra.(*Command).ExecuteC(0xc000736308)
        github.com/spf13/[email protected]/command.go:1068 +0x3a5 fp=0xc000617d10 sp=0xc000617c58 pc=0x5cb925
github.com/spf13/cobra.(*Command).Execute(...)
        github.com/spf13/[email protected]/command.go:992
k8s.io/component-base/cli.run(0xc000736308)
        k8s.io/[email protected]/cli/run.go:146 +0x290 fp=0xc000617d98 sp=0xc000617d10 pc=0xaa9db0
k8s.io/component-base/cli.Run(0x2352df0?)
        k8s.io/[email protected]/cli/run.go:46 +0x17 fp=0xc000617e18 sp=0xc000617d98 pc=0xaa9a57
main.main()
        k8s.io/kubernetes/cmd/kubectl-sdk/kubectl.go:41 +0x85 fp=0xc000617f50 sp=0xc000617e18 pc=0x1a0b4c5
runtime.main()
        runtime/proc.go:271 +0x29d fp=0xc000617fe0 sp=0xc000617f50 pc=0x44077d
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc000617fe8 sp=0xc000617fe0 pc=0x474a21

goroutine 2 gp=0xc000006700 m=nil [force gc (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000aefa8 sp=0xc0000aef88 pc=0x440bce
runtime.goparkunlock(...)
        runtime/proc.go:408
runtime.forcegchelper()
        runtime/proc.go:326 +0xb3 fp=0xc0000aefe0 sp=0xc0000aefa8 pc=0x440a33
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000aefe8 sp=0xc0000aefe0 pc=0x474a21
created by runtime.init.6 in goroutine 1
        runtime/proc.go:314 +0x1a

goroutine 3 gp=0xc000006c40 m=nil [GC sweep wait]:
runtime.gopark(0x1?, 0x0?, 0x0?, 0x0?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000af780 sp=0xc0000af760 pc=0x440bce
runtime.goparkunlock(...)
        runtime/proc.go:408
runtime.bgsweep(0xc0000ce000)
        runtime/mgcsweep.go:318 +0xdf fp=0xc0000af7c8 sp=0xc0000af780 pc=0x42a4bf
runtime.gcenable.gowrap1()
        runtime/mgc.go:203 +0x25 fp=0xc0000af7e0 sp=0xc0000af7c8 pc=0x41edc5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000af7e8 sp=0xc0000af7e0 pc=0x474a21
created by runtime.gcenable in goroutine 1
        runtime/mgc.go:203 +0x66

goroutine 4 gp=0xc000006e00 m=nil [sleep]:
runtime.gopark(0xc0000dc000?, 0x56d34ddba6b?, 0x0?, 0x0?, 0x2191ce8?)
        runtime/proc.go:402 +0xce fp=0xc0000aff38 sp=0xc0000aff18 pc=0x440bce
runtime.goparkunlock(...)
        runtime/proc.go:408
runtime.(*scavengerState).sleep(0x3366500, 0x410ee87800000000)
        runtime/mgcscavenge.go:504 +0x113 fp=0xc0000affa8 sp=0xc0000aff38 pc=0x428033
runtime.bgscavenge(0xc0000ce000)
        runtime/mgcscavenge.go:662 +0x74 fp=0xc0000affc8 sp=0xc0000affa8 pc=0x428434
runtime.gcenable.gowrap2()
        runtime/mgc.go:204 +0x25 fp=0xc0000affe0 sp=0xc0000affc8 pc=0x41ed65
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000affe8 sp=0xc0000affe0 pc=0x474a21
created by runtime.gcenable in goroutine 1
        runtime/mgc.go:204 +0xa5

goroutine 5 gp=0xc000007340 m=nil [finalizer wait]:
runtime.gopark(0xc0000ae660?, 0x42733c?, 0x60?, 0xa1?, 0x550011?)
        runtime/proc.go:402 +0xce fp=0xc0000ae620 sp=0xc0000ae600 pc=0x440bce
runtime.runfinq()
        runtime/mfinal.go:194 +0x107 fp=0xc0000ae7e0 sp=0xc0000ae620 pc=0x41de07
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000ae7e8 sp=0xc0000ae7e0 pc=0x474a21
created by runtime.createfing in goroutine 1
        runtime/mfinal.go:164 +0x3d

goroutine 6 gp=0xc000221a40 m=nil [GC worker (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000b0750 sp=0xc0000b0730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000b07e0 sp=0xc0000b0750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000b07e8 sp=0xc0000b07e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 7 gp=0xc000221c00 m=nil [GC worker (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000b0f50 sp=0xc0000b0f30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000b0fe0 sp=0xc0000b0f50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000b0fe8 sp=0xc0000b0fe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 17 gp=0xc000500000 m=nil [GC worker (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000aa750 sp=0xc0000aa730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000aa7e0 sp=0xc0000aa750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000aa7e8 sp=0xc0000aa7e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 8 gp=0xc000221dc0 m=nil [GC worker (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000b1750 sp=0xc0000b1730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000b17e0 sp=0xc0000b1750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000b17e8 sp=0xc0000b17e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 33 gp=0xc000102380 m=nil [GC worker (idle)]:
runtime.gopark(0x0?, 0x0?, 0x0?, 0x0?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc00011a750 sp=0xc00011a730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc00011a7e0 sp=0xc00011a750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc00011a7e8 sp=0xc00011a7e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 9 gp=0xc0004d4000 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be37a?, 0x3?, 0xa2?, 0x21?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000b1f50 sp=0xc0000b1f30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000b1fe0 sp=0xc0000b1f50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000b1fe8 sp=0xc0000b1fe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 34 gp=0xc000102540 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91ad57b?, 0x0?, 0x0?, 0x0?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc00011af50 sp=0xc00011af30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc00011afe0 sp=0xc00011af50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc00011afe8 sp=0xc00011afe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 35 gp=0xc000102700 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be4a6?, 0x1?, 0x8d?, 0xfa?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc00011b750 sp=0xc00011b730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc00011b7e0 sp=0xc00011b750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc00011b7e8 sp=0xc00011b7e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 18 gp=0xc0005001c0 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91beadc?, 0x1?, 0x7?, 0x75?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000aaf50 sp=0xc0000aaf30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000aafe0 sp=0xc0000aaf50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000aafe8 sp=0xc0000aafe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 10 gp=0xc0004d41c0 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91bdefc?, 0x3?, 0x82?, 0x6d?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc000116750 sp=0xc000116730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0001167e0 sp=0xc000116750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0001167e8 sp=0xc0001167e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 36 gp=0xc0001028c0 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be47e?, 0x3?, 0x16?, 0x1c?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc00011bf50 sp=0xc00011bf30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc00011bfe0 sp=0xc00011bf50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc00011bfe8 sp=0xc00011bfe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 19 gp=0xc000500380 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be74e?, 0x3?, 0x2?, 0x26?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000ab750 sp=0xc0000ab730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000ab7e0 sp=0xc0000ab750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000ab7e8 sp=0xc0000ab7e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 11 gp=0xc0004d4380 m=nil [GC worker (idle)]:
runtime.gopark(0x33ca160?, 0x1?, 0x79?, 0x71?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc000116f50 sp=0xc000116f30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc000116fe0 sp=0xc000116f50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc000116fe8 sp=0xc000116fe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 12 gp=0xc0004d4540 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91af0dd?, 0x1?, 0xc3?, 0xfb?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc000117750 sp=0xc000117730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0001177e0 sp=0xc000117750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0001177e8 sp=0xc0001177e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 20 gp=0xc000500540 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91bdeca?, 0x3?, 0x91?, 0x22?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000abf50 sp=0xc0000abf30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000abfe0 sp=0xc0000abf50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000abfe8 sp=0xc0000abfe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 37 gp=0xc000102a80 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be226?, 0x1?, 0xe0?, 0x7e?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc00011c750 sp=0xc00011c730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc00011c7e0 sp=0xc00011c750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc00011c7e8 sp=0xc00011c7e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 21 gp=0xc000500700 m=nil [GC worker (idle)]:
runtime.gopark(0x33ca160?, 0x3?, 0xa6?, 0x31?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000ac750 sp=0xc0000ac730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000ac7e0 sp=0xc0000ac750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000ac7e8 sp=0xc0000ac7e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 22 gp=0xc0005008c0 m=nil [GC worker (idle)]:
runtime.gopark(0x33ca160?, 0x1?, 0x7f?, 0x8e?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0000acf50 sp=0xc0000acf30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0000acfe0 sp=0xc0000acf50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0000acfe8 sp=0xc0000acfe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 13 gp=0xc0004d4700 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be5b4?, 0x3?, 0xbc?, 0x2?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc000117f50 sp=0xc000117f30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc000117fe0 sp=0xc000117f50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc000117fe8 sp=0xc000117fe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 14 gp=0xc0004d48c0 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be30c?, 0x1?, 0x8d?, 0x76?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc000118750 sp=0xc000118730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0001187e0 sp=0xc000118750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0001187e8 sp=0xc0001187e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 15 gp=0xc0004d4a80 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be640?, 0x3?, 0x78?, 0x5?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc000118f50 sp=0xc000118f30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc000118fe0 sp=0xc000118f50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc000118fe8 sp=0xc000118fe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 16 gp=0xc0004d4c40 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be334?, 0x1?, 0x89?, 0x4e?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc000119750 sp=0xc000119730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0001197e0 sp=0xc000119750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0001197e8 sp=0xc0001197e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 49 gp=0xc0004d4e00 m=nil [GC worker (idle)]:
runtime.gopark(0x56cf91be41a?, 0x3?, 0x58?, 0x11?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc000119f50 sp=0xc000119f30 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc000119fe0 sp=0xc000119f50 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc000119fe8 sp=0xc000119fe0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 50 gp=0xc0004d4fc0 m=nil [GC worker (idle)]:
runtime.gopark(0x33ca160?, 0x1?, 0xab?, 0xe4?, 0x0?)
        runtime/proc.go:402 +0xce fp=0xc0004da750 sp=0xc0004da730 pc=0x440bce
runtime.gcBgMarkWorker()
        runtime/mgc.go:1310 +0xe5 fp=0xc0004da7e0 sp=0xc0004da750 pc=0x420ea5
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0004da7e8 sp=0xc0004da7e0 pc=0x474a21
created by runtime.gcBgMarkStartWorkers in goroutine 1
        runtime/mgc.go:1234 +0x1c

goroutine 59 gp=0xc000623180 m=nil [select]:
runtime.gopark(0xc0004ddf78?, 0x2?, 0x0?, 0x0?, 0xc0004ddf5c?)
        runtime/proc.go:402 +0xce fp=0xc0004dde00 sp=0xc0004ddde0 pc=0x440bce
runtime.selectgo(0xc0004ddf78, 0xc0004ddf58, 0x0?, 0x0, 0x0?, 0x1)
        runtime/select.go:327 +0x725 fp=0xc0004ddf20 sp=0xc0004dde00 pc=0x452ba5
k8s.io/klog/v2.(*flushDaemon).run.func1()
        k8s.io/klog/[email protected]/klog.go:1141 +0x117 fp=0xc0004ddfe0 sp=0xc0004ddf20 pc=0x70a577
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc0004ddfe8 sp=0xc0004ddfe0 pc=0x474a21
created by k8s.io/klog/v2.(*flushDaemon).run in goroutine 1
        k8s.io/klog/[email protected]/klog.go:1137 +0x171

goroutine 43 gp=0xc000623500 m=nil [IO wait]:
runtime.gopark(0x25cc1636106e14ca?, 0x8ff246bd82c8f665?, 0xca?, 0x14?, 0xb?)
        runtime/proc.go:402 +0xce fp=0xc00097d6f8 sp=0xc00097d6d8 pc=0x440bce
runtime.netpollblock(0x48a5d8?, 0x408946?, 0x0?)
        runtime/netpoll.go:573 +0xf7 fp=0xc00097d730 sp=0xc00097d6f8 pc=0x4399b7
internal/poll.runtime_pollWait(0x7fd9ea607eb0, 0x72)
        runtime/netpoll.go:345 +0x85 fp=0xc00097d750 sp=0xc00097d730 pc=0x46f0c5
internal/poll.(*pollDesc).wait(0xc0007cb280?, 0xc00080e000?, 0x0)
        internal/poll/fd_poll_runtime.go:84 +0x27 fp=0xc00097d778 sp=0xc00097d750 pc=0x4b2ee7
internal/poll.(*pollDesc).waitRead(...)
        internal/poll/fd_poll_runtime.go:89
internal/poll.(*FD).Read(0xc0007cb280, {0xc00080e000, 0x4000, 0x4000})
        internal/poll/fd_unix.go:164 +0x27a fp=0xc00097d810 sp=0xc00097d778 pc=0x4b41da
net.(*netFD).Read(0xc0007cb280, {0xc00080e000?, 0x7fd9ea612898?, 0xc0003ca648?})
        net/fd_posix.go:55 +0x25 fp=0xc00097d858 sp=0xc00097d810 pc=0x550765
net.(*conn).Read(0xc0007186b0, {0xc00080e000?, 0xc00097d938?, 0x411bbb?})
        net/net.go:185 +0x45 fp=0xc00097d8a0 sp=0xc00097d858 pc=0x560c05
net.(*TCPConn).Read(0xc0007fc540?, {0xc00080e000?, 0x3371880?, 0x2?})
        <autogenerated>:1 +0x25 fp=0xc00097d8d0 sp=0xc00097d8a0 pc=0x571225
crypto/tls.(*atLeastReader).Read(0xc0003ca648, {0xc00080e000?, 0x0?, 0xc0003ca648?})
        crypto/tls/conn.go:806 +0x3b fp=0xc00097d918 sp=0xc00097d8d0 pc=0x69d91b
bytes.(*Buffer).ReadFrom(0xc0007fc630, {0x23587a0, 0xc0003ca648})
        bytes/buffer.go:211 +0x98 fp=0xc00097d970 sp=0xc00097d918 pc=0x51b598
crypto/tls.(*Conn).readFromUntil(0xc0007fc388, {0x2359220, 0xc0007186b0}, 0xc00097d980?)
        crypto/tls/conn.go:828 +0xde fp=0xc00097d9a8 sp=0xc00097d970 pc=0x69dafe
crypto/tls.(*Conn).readRecordOrCCS(0xc0007fc388, 0x0)
        crypto/tls/conn.go:626 +0x3cf fp=0xc00097dc28 sp=0xc00097d9a8 pc=0x69ac0f
crypto/tls.(*Conn).readRecord(...)
        crypto/tls/conn.go:588
crypto/tls.(*Conn).Read(0xc0007fc388, {0xc000903000, 0x1000, 0xa302f1?})
        crypto/tls/conn.go:1370 +0x156 fp=0xc00097dc98 sp=0xc00097dc28 pc=0x6a14b6
bufio.(*Reader).Read(0xc000715920, {0xc0001ea820, 0x9, 0x0?})
        bufio/bufio.go:241 +0x197 fp=0xc00097dcd0 sp=0xc00097dc98 pc=0x575dd7
io.ReadAtLeast({0x2357780, 0xc000715920}, {0xc0001ea820, 0x9, 0x9}, 0x9)
        io/io.go:335 +0x90 fp=0xc00097dd18 sp=0xc00097dcd0 pc=0x4abcf0
io.ReadFull(...)
        io/io.go:354
golang.org/x/net/http2.readFrameHeader({0xc0001ea820, 0x9, 0x97ddc0?}, {0x2357780?, 0xc000715920?})
        golang.org/x/[email protected]/http2/frame.go:237 +0x65 fp=0xc00097dd68 sp=0xc00097dd18 pc=0xa28865
golang.org/x/net/http2.(*Framer).ReadFrame(0xc0001ea7e0)
        golang.org/x/[email protected]/http2/frame.go:498 +0x85 fp=0xc00097de10 sp=0xc00097dd68 pc=0xa28fa5
golang.org/x/net/http2.(*clientConnReadLoop).run(0xc00097dfa8)
        golang.org/x/[email protected]/http2/transport.go:2429 +0xd8 fp=0xc00097df60 sp=0xc00097de10 pc=0xa3dcb8
golang.org/x/net/http2.(*ClientConn).readLoop(0xc000002000)
        golang.org/x/[email protected]/http2/transport.go:2325 +0x65 fp=0xc00097dfc8 sp=0xc00097df60 pc=0xa3d2a5
golang.org/x/net/http2.(*ClientConn).readLoop-fm()
        <autogenerated>:1 +0x25 fp=0xc00097dfe0 sp=0xc00097dfc8 pc=0xa47125
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc00097dfe8 sp=0xc00097dfe0 pc=0x474a21
created by golang.org/x/net/http2.(*ClientConn).goRun in goroutine 42
        golang.org/x/[email protected]/http2/transport.go:369 +0x2d

goroutine 121 gp=0xc000682fc0 m=nil [select]:
runtime.gopark(0xc000a70ef8?, 0x6?, 0x38?, 0xa2?, 0xc000a70dac?)
        runtime/proc.go:402 +0xce fp=0xc000a70c28 sp=0xc000a70c08 pc=0x440bce
runtime.selectgo(0xc000a70ef8, 0xc000a70da0, 0xc000000007?, 0x0, 0x41f156?, 0x1)
        runtime/select.go:327 +0x725 fp=0xc000a70d48 sp=0xc000a70c28 pc=0x452ba5
golang.org/x/net/http2.(*clientStream).writeRequest(0xc000726f00, 0xc000802000)
        golang.org/x/[email protected]/http2/transport.go:1608 +0xd45 fp=0xc000a70f98 sp=0xc000a70d48 pc=0xa39285
golang.org/x/net/http2.(*clientStream).doRequest(0xc000726f00, 0x0?)
        golang.org/x/[email protected]/http2/transport.go:1436 +0x18 fp=0xc000a70fc0 sp=0xc000a70f98 pc=0xa384f8
golang.org/x/net/http2.(*ClientConn).roundTrip.func1()
        golang.org/x/[email protected]/http2/transport.go:1312 +0x1b fp=0xc000a70fe0 sp=0xc000a70fc0 pc=0xa384bb
runtime.goexit({})
        runtime/asm_amd64.s:1695 +0x1 fp=0xc000a70fe8 sp=0xc000a70fe0 pc=0x474a21
created by golang.org/x/net/http2.(*ClientConn).goRun in goroutine 1
        golang.org/x/[email protected]/http2/transport.go:369 +0x2d

rax    0xca
rbx    0x0
rcx    0x476843
rdx    0x0
rdi    0x3367e60
rsi    0x80
rbp    0x7ffdd90e3f40
rsp    0x7ffdd90e3ef8
r8     0x0
r9     0x0
r10    0x0
r11    0x286
r12    0xc000072008
r13    0x1
r14    0x3367080
r15    0x1
rip    0x476841
rflags 0x286
cs     0x33
fs     0x0
gs     0x0

Environment:

  • Kubernetes client and server versions (use kubectl version):

gcp cluster

Client Version: v1.29.7
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3
Server Version: v1.29.7-gke.1104000

kind setup

Client Version: v1.28.12-dispatcher
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3
Server Version: v1.31.0
WARNING: version difference between client (1.28) and server (1.31) exceeds the supported minor version skew of +/-1
  • Cloud provider or hardware configuration:
fira@fedora
-----------
OS: Fedora Linux 40 (Workstation Edition) x86_64
Host: MS-7E26 1.0
Kernel: 6.10.6-200.fc40.x86_64
Uptime: 1 hour, 45 mins
Packages: 2810 (rpm), 13 (flatpak)
Shell: bash 5.2.26
Resolution: 2560x1440
DE: GNOME 46.4
WM: Mutter
WM Theme: Adwaita
Theme: Adwaita [GTK2/3]
Icons: Adwaita [GTK2/3]
Terminal: zellij
CPU: AMD Ryzen 9 7900X (24) @ 5.733GHz
GPU: AMD ATI Radeon RX 7700 XT / 7800 XT
GPU: AMD ATI 12:00.0 Raphael
Memory: 7776MiB / 31189MiB

Tried this both in working gcp cluster and kind. Same issue.

  • OS (e.g: cat /etc/os-release):
NAME="Fedora Linux"
VERSION="40 (Workstation Edition)"
ID=fedora
VERSION_ID=40
VERSION_CODENAME=""
PLATFORM_ID="platform:f40"
PRETTY_NAME="Fedora Linux 40 (Workstation Edition)"
ANSI_COLOR="0;38;2;60;110;180"
LOGO=fedora-logo-icon
CPE_NAME="cpe:/o:fedoraproject:fedora:40"
DEFAULT_HOSTNAME="fedora"
HOME_URL="https://fedoraproject.org/"
DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/f40/system-administrators-guide/"
SUPPORT_URL="https://ask.fedoraproject.org/"
BUG_REPORT_URL="https://bugzilla.redhat.com/"
REDHAT_BUGZILLA_PRODUCT="Fedora"
REDHAT_BUGZILLA_PRODUCT_VERSION=40
REDHAT_SUPPORT_PRODUCT="Fedora"
REDHAT_SUPPORT_PRODUCT_VERSION=40
SUPPORT_END=2025-05-13
VARIANT="Workstation Edition"
VARIANT_ID=workstation
  • Package
# I'm using the fedora rpm package
$ sudo dnf install kubectl
Last metadata expiration check: 1:16:46 ago on Sat 07 Sep 2024 03:43:01 AM CEST.
Package kubectl-491.0.0-1.x86_64 is already installed.
Dependencies resolved.
Nothing to do.
Complete!
@fira42073 fira42073 added the kind/bug Categorizes issue or PR as related to a bug. label Sep 7, 2024
@k8s-ci-robot k8s-ci-robot added the needs-triage Indicates an issue or PR lacks a `triage/foo` label and requires one. label Sep 7, 2024
@ardaguclu
Copy link
Member

Remote command executions such as exec, attach, cp, etc. handle the SIGQUIT (triggered by ctrl + \) properly by exiting after throwing ^\command terminated with exit code 131 error. On the other hand, as @fira42073 stated log command panicks unexpectedly. This looks to me an issue.

@fira42073
Copy link
Author

I'm willing to contribute if you could point me to some possible reasons. Right now I'm not sure what's happening, because this doesn't really look like a panic either. I see some synchronization primitives in the log like sync.(*Cond).Wait, so I assume it may be some concurrency issue.

ChatGPT suggests a similar conclusion

The trace you shared appears to be from a Go program involving several goroutines, most of which are idle garbage collector (GC) workers. Here's a quick breakdown of some key elements:

  1. Garbage Collector Workers:

    • The majority of the goroutines in this trace, like goroutine 35, goroutine 18, and others, are GC background workers. They are in an idle state waiting for the garbage collector to signal work (runtime.gopark is waiting for the GC system to start).
    • These goroutines are created by the Go runtime to handle the process of garbage collection, which reclaims memory that is no longer in use.
  2. Network Communication:

    • Goroutine 43 seems to be waiting for IO related to network communication. It is using the netpollblock mechanism, which is typical for blocking on network events like reading or writing to a socket.
    • Goroutine 59 is part of a select block, commonly used in Go to manage multiple channel operations or asynchronous IO. It could be handling communication or synchronization between goroutines.
  3. TLS (Transport Layer Security):

    • Goroutines 43 and 59 also include parts where data is being read from a crypto/tls connection, which is involved in secure communications (likely HTTPS). These involve operations like reading from the network and handling TLS record processing.
  4. HTTP2 Communication:

    • Goroutine 59 also handles HTTP2 frame reading (golang.org/x/net/http2.readFrameHeader). This likely indicates that the program is communicating over HTTP2, which is common in modern web services.

Potential Areas to Check:

  • Garbage Collection: A large number of idle GC workers may not necessarily be a problem, but you should ensure that garbage collection isn't causing unnecessary overhead, especially in high-load scenarios.
  • Network Performance: If you're experiencing slow performance, some of the goroutines seem to be involved in network IO. Checking for blocked or slow connections might help.
  • Concurrency: Look at how many goroutines are running and ensure that the system isn't creating more goroutines than needed, as this can lead to excess memory consumption or context switching overhead.

@xyz-li
Copy link

xyz-li commented Sep 10, 2024

func (o LogsOptions) RunLogs() error {
	var requests map[corev1.ObjectReference]rest.ResponseWrapper
	var err error
	if o.AllPods {
		requests, err = o.AllPodLogsForObject(o.RESTClientGetter, o.Object, o.Options, o.GetPodTimeout, o.AllContainers)
	} else {
		requests, err = o.LogsForObject(o.RESTClientGetter, o.Object, o.Options, o.GetPodTimeout, o.AllContainers)
	}
	if err != nil {
		return err
	}

	intr := interrupt.New(nil, func() {})
	return intr.Run(func() error {
		if o.Follow && len(requests) > 1 {
			if len(requests) > o.MaxFollowConcurrency {
				return fmt.Errorf(
					"you are attempting to follow %d log streams, but maximum allowed concurrency is %d, use --max-log-requests to increase the limit",
					len(requests), o.MaxFollowConcurrency,
				)
			}

			return o.parallelConsumeRequest(requests)
		}

		return o.sequentialConsumeRequest(requests)
	})
}

@fira42073 You can try this.

@mpuckett159
Copy link
Contributor

/triage accepted

@k8s-ci-robot k8s-ci-robot added triage/accepted Indicates an issue or PR is ready to be actively worked on. and removed needs-triage Indicates an issue or PR lacks a `triage/foo` label and requires one. labels Sep 11, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
kind/bug Categorizes issue or PR as related to a bug. triage/accepted Indicates an issue or PR is ready to be actively worked on.
Projects
None yet
Development

No branches or pull requests

5 participants