Skip to content

Commit

Permalink
perf(term): add benchmarks
Browse files Browse the repository at this point in the history
  • Loading branch information
lihop committed Jun 9, 2024
1 parent 9d76482 commit 0d04fd1
Show file tree
Hide file tree
Showing 10 changed files with 288 additions and 0 deletions.
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
# Files to exclude from asset-lib download.
/addons/gd-plug export-ignore
/default_env.tres export-ignore
/benchmark export-ignore
/docs export-ignore
/.env.example export-ignore
/examples export-ignore
Expand Down
71 changes: 71 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,77 @@ jobs:
name: failed-screenshots
path: test/visual_regression/screenshots

benchmark:
name: Benchmark (${{matrix.benchmark}})
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
benchmark:
[
editor_launch,
cursor_motion,
dense_cells,
light_cells,
scrolling,
scrolling_bottom_region,
scrolling_bottom_small_region,
scrolling_fullscreen,
scrolling_top_region,
scrolling_top_small_region,
unicode,
]
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Setup Godot
uses: lihop/setup-godot@v2
with:
version: "4.2.2-stable"
- name: Install just
uses: taiki-e/install-action@just
- name: Import assets
shell: bash
run: godot --editor --headless --quit-after 100 || true
- name: Wait for build
uses: fountainhead/[email protected]
with:
token: ${{ secrets.GITHUB_TOKEN }}
checkName: "Build (linux, x86_64, debug) #${{ github.run_number }}"
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Install binary build artifacts
uses: actions/download-artifact@v4
with:
path: addons/godot_xterm/native/bin
merge-multiple: true
- name: Benchmark
shell: bash
run: just bench ${{matrix.benchmark}}
- name: Upload results
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ matrix.benchmark }}
path: benchmark/results/*.json

publish-benchmarks:
name: Process Benchmarks
runs-on: ubuntu-latest
needs: [benchmark]
steps:
- uses: actions/upload-artifact/merge@v4
with:
name: benchmark-results
pattern: "benchmark-results-*"
delete-merged: true
- uses: actions/download-artifact@v4
with:
path: benchmark/results/
- name: Merge results
run: jq -s '[.[][]]' benchmark/results/*.json > benchmark/results.json
- name: cat results
run: cat benchmark/results.json

merge-artifacts:
name: Merge Artifacts
runs-on: ubuntu-latest
Expand Down
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,6 @@
[submodule "addons/godot_xterm/native/thirdparty/node-pty"]
path = addons/godot_xterm/native/thirdparty/node-pty
url = https://github.com/microsoft/node-pty
[submodule "benchmark/vtebench"]
path = benchmark/vtebench
url = [email protected]:alacritty/vtebench
9 changes: 9 additions & 0 deletions Justfile
Original file line number Diff line number Diff line change
Expand Up @@ -32,3 +32,12 @@ test-visual:

uninstall:
{{godot}} --headless -s plug.gd uninstall

bench name="":
@if [ "{{name}}" = "editor_launch" ]; then \
./benchmark/editor_launch.sh {{godot}}; \
elif [ -n "{{name}}" ]; then \
{{godot}} --windowed --resolution 800x600 --position 0,0 benchmark/benchmark.tscn -- --benchmark={{name}}; \
else \
ls -1 benchmark/vtebench/benchmarks | xargs -I {} just bench {} && just bench editor_launch; \
fi
121 changes: 121 additions & 0 deletions benchmark/benchmark.gd
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
extends Control


class Results:
var render_cpu := 0.0
var render_gpu := 0.0
var vtebench := {value = 0.0, variance = 0.0}


var terminal_exit_code := -1


func _ready():
var timeout := 120
var benchmark := ""

var args = OS.get_cmdline_user_args()
for arg in args:
if arg.begins_with("--benchmark"):
benchmark = arg.split("=")[1]

if benchmark.is_empty():
_quit_with_error("No benchmark specified")

RenderingServer.viewport_set_measure_render_time(get_tree().root.get_viewport_rid(), true)

var results := Results.new()
var begin_time := Time.get_ticks_usec()
var frames_captured := 0

$Terminal.run_benchmark(benchmark)
await $Terminal.started

while terminal_exit_code == -1:
await get_tree().process_frame

if Time.get_ticks_usec() - begin_time > (timeout * 1e6):
_quit_with_error("Benchmark took longer than %ss to run" % timeout)

results.render_cpu += (
RenderingServer.viewport_get_measured_render_time_cpu(
get_tree().root.get_viewport_rid()
)
+ RenderingServer.get_frame_setup_time_cpu()
)
results.render_gpu += RenderingServer.viewport_get_measured_render_time_gpu(
get_tree().root.get_viewport_rid()
)

if terminal_exit_code != 0:
_quit_with_error("Terminal exited with error code: %d" % terminal_exit_code)

results.render_cpu /= float(max(1.0, float(frames_captured)))
results.render_gpu /= float(max(1.0, float(frames_captured)))

results.vtebench = _process_dat_results("res://benchmark/results/%s.dat" % benchmark)

var json_results = (
JSON
. stringify(
[
{
name = benchmark,
value = _round(results.vtebench.value),
range = results.vtebench.range,
},
{
name = "%s - render cpu" % benchmark,
value = _round(results.render_cpu),
},
{
name = "%s - render gpu" % benchmark,
value = _round(results.render_gpu),
}
],
" "
)
)

var file = FileAccess.open("res://benchmark/results/%s.json" % benchmark, FileAccess.WRITE)
file.store_string(json_results)

print(json_results)
get_tree().quit(terminal_exit_code)


func _on_terminal_exited(exit_code: int):
terminal_exit_code = exit_code


func _round(val: float, sig_figs := 4) -> float:
return snapped(val, pow(10, floor(log(val) / log(10)) - sig_figs + 1))


func _process_dat_results(path: String) -> Dictionary:
var file := FileAccess.open(path, FileAccess.READ)
var samples := []

file.get_line() # Skip the first 'header' line.
while !file.eof_reached():
var line := file.get_line().strip_edges()
if line.is_valid_float():
samples.append(line.to_float())

if samples.size() < 2:
_quit_with_error("Not enough samples")

var avg: float = (samples.reduce(func(acc, n): return acc + n, 0)) / samples.size()

var std_dev := 0.0
for sample in samples:
std_dev += pow(sample - avg, 2)
std_dev /= (samples.size() - 1)

return {value = avg, range = %.2f" % _round(sqrt(std_dev))}


func _quit_with_error(error_msg: String):
await get_tree().process_frame
push_error(error_msg)
get_tree().quit(1)
26 changes: 26 additions & 0 deletions benchmark/benchmark.tscn
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
[gd_scene load_steps=5 format=3 uid="uid://b2axn64mqnt8n"]

[ext_resource type="Script" path="res://benchmark/benchmark.gd" id="1_tmqb5"]
[ext_resource type="PackedScene" uid="uid://cysad55lwtnc6" path="res://examples/terminal/terminal.tscn" id="2_3raq0"]
[ext_resource type="Script" path="res://benchmark/terminal_benchmark.gd" id="3_8t8od"]
[ext_resource type="FontVariation" uid="uid://ckq73bs2fwsie" path="res://themes/fonts/regular.tres" id="3_hnrrm"]

[node name="Benchmark" type="Control"]
layout_mode = 3
anchors_preset = 15
anchor_right = 1.0
anchor_bottom = 1.0
grow_horizontal = 2
grow_vertical = 2
script = ExtResource("1_tmqb5")

[node name="Terminal" parent="." instance=ExtResource("2_3raq0")]
layout_mode = 1
focus_mode = 0
theme_override_fonts/normal_font = ExtResource("3_hnrrm")
script = ExtResource("3_8t8od")

[connection signal="exited" from="Terminal" to="." method="_on_terminal_exited"]
[connection signal="data_received" from="Terminal/PTY" to="Terminal" method="_on_pty_data_received"]

[editable path="Terminal"]
23 changes: 23 additions & 0 deletions benchmark/editor_launch.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
#!/usr/bin/env bash

set -e

godot=${1:-godot}

if ! command -v $godot &> /dev/null; then
echo "Error: '$godot' command not found. Please provide a valid path to the Godot executable."
exit 1
fi

results_file=benchmark/results/editor_launch.json
value=$({ time -p $godot --editor --quit; } 2>&1 | tail -n3 | head -n1 | cut -d' ' -f2)
cat <<EOF > $results_file
[
{
"name": "editor_launch",
"value": $value
}
]
EOF
cat $results_file

2 changes: 2 additions & 0 deletions benchmark/results/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
*
!.gitignore
31 changes: 31 additions & 0 deletions benchmark/terminal_benchmark.gd
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
extends "res://examples/terminal/terminal.gd"

signal started
signal exited(exit_code: int)

var vtebench_dir := ProjectSettings.globalize_path("res://benchmark/vtebench")


func _ready():
pty.connect("exited", self._on_exit)


func run_benchmark(benchmark):
pty.fork(
"cargo",
["run", "--", "-b", "benchmarks/%s" % benchmark, "--dat", "../results/%s.dat" % benchmark],
vtebench_dir,
87,
29
)


func _on_exit(exit_code, _signal):
exited.emit(exit_code)


func _on_pty_data_received(data: PackedByteArray):
# Listen for the reset sequence (\x1bc), to determine that the benchmark has started.
if data.slice(0, 2) == PackedByteArray([27, 99]):
$PTY.disconnect("data_received", _on_pty_data_received)
started.emit()
1 change: 1 addition & 0 deletions benchmark/vtebench
Submodule vtebench added at c75155

0 comments on commit 0d04fd1

Please sign in to comment.