From 5896e8656262049d7a14939a600cb9a4e723ead9 Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Tue, 14 Apr 2026 11:24:39 +0200 Subject: [PATCH 01/12] ci: fix ubuntu repository resolution and enable on current branch --- .github/workflows/ci.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a3dc137..30c42e7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,7 +5,7 @@ on: pull_request: branches: [main] push: - branches: [main] + branches: [main, "70-investigation-excessive-ci-execution-time-on-ubuntu-runners"] workflow_dispatch: concurrency: @@ -43,7 +43,8 @@ jobs: ninja-build ccache lld # Install LLVM and Clang 20 wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - - sudo apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-20 main" + UBUNTU_CODENAME=$(lsb_release -cs) + sudo apt-add-repository "deb http://apt.llvm.org/${UBUNTU_CODENAME}/ llvm-toolchain-${UBUNTU_CODENAME}-20 main" sudo apt-get update sudo apt-get install -y llvm-20 llvm-20-dev clang-20 libclang-20-dev echo "LLVM_DIR=/usr/lib/llvm-20/lib/cmake/llvm" >> $GITHUB_ENV From 034820c8891f067ba9504e76db48f0238c2ef0b4 Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Tue, 14 Apr 2026 11:44:11 +0200 Subject: [PATCH 02/12] ci: cache test artifacts and use file hashing in run_test to avoid cache invalidation --- .github/workflows/ci.yml | 8 ++++++++ run_test.py | 31 ++++++++++++++++++++++++++----- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 30c42e7..180b034 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -109,6 +109,14 @@ jobs: run: ccache -s # Tests + - name: Restore run_test cache + uses: actions/cache@v4 + with: + path: .cache/run_test + key: run-test-${{ runner.os }}-${{ hashFiles('build/stack_usage_analyzer', 'test/**', 'run_test.py') }} + restore-keys: | + run-test-${{ runner.os }}- + - name: Test Stack Usage Analyzer timeout-minutes: 45 run: | diff --git a/run_test.py b/run_test.py index 580c149..be16554 100755 --- a/run_test.py +++ b/run_test.py @@ -38,6 +38,29 @@ class TestRunConfig: RUN_CONFIG = TestRunConfig() _CACHE_LOCK = threading.Lock() _MEM_CACHE = {} +_FILE_HASH_CACHE = {} + +def _get_file_hash(p: Path) -> str: + path_str = str(p) + try: + st = p.stat() + except OSError: + return "" + + # Use st_mtime_ns as a cache key for the hash + cache_key = (path_str, st.st_mtime_ns, st.st_size) + with _CACHE_LOCK: + if cache_key in _FILE_HASH_CACHE: + return _FILE_HASH_CACHE[cache_key] + + try: + h = hashlib.sha256(p.read_bytes()).hexdigest() + except OSError: + h = "" + + with _CACHE_LOCK: + _FILE_HASH_CACHE[cache_key] = h + return h # Set to True while the top-level parallel check phase is running. # Prevents nested ThreadPoolExecutor creation (N² process explosion). _PARALLEL_PHASE = False @@ -170,11 +193,9 @@ def _collect_cache_dependencies(args): candidates.add(p.resolve()) for p in sorted(candidates, key=lambda x: str(x)): - try: - st = p.stat() - except OSError: - continue - deps.append([str(p), st.st_mtime_ns, st.st_size]) + h = _get_file_hash(p) + if h: + deps.append([str(p), h]) return deps From da197e4ef7d1acea9b00290b6d6b4881b3942624 Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Tue, 14 Apr 2026 11:56:48 +0200 Subject: [PATCH 03/12] ci: parallelize run_code_analysis.py to reduce execution time --- scripts/ci/run_code_analysis.py | 99 ++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 27 deletions(-) diff --git a/scripts/ci/run_code_analysis.py b/scripts/ci/run_code_analysis.py index 4ea7184..32c437e 100755 --- a/scripts/ci/run_code_analysis.py +++ b/scripts/ci/run_code_analysis.py @@ -7,6 +7,9 @@ import json import subprocess import sys +import os +import math +import concurrent.futures from pathlib import Path from typing import Iterable @@ -380,38 +383,80 @@ def main() -> int: ensure_parent(Path(args.sarif_out)) sarif_out_path = str(Path(args.sarif_out).resolve()) - print(f"Running analyzer on {len(selected_inputs)} file(s).") - cmd = analyzer_cmd( - analyzer=analyzer, - inputs=selected_inputs, - fmt="json", - compdb_path=compdb_path, - base_dir=args.base_dir, - extra_args=args.analyzer_arg, - sarif_out=sarif_out_path, - ) - run = subprocess.run(cmd, check=False, capture_output=True, text=True) - if run.returncode != 0: - if run.stdout: - sys.stdout.write(run.stdout) - if run.stderr: - sys.stderr.write(run.stderr) - return run.returncode - - try: - payload = json.loads(run.stdout) - except json.JSONDecodeError as exc: - print(f"Analyzer returned invalid JSON: {exc}", file=sys.stderr) + jobs = int(os.environ.get("ANALYZER_JOBS", os.cpu_count() or 1)) + chunk_size = max(1, math.ceil(len(selected_inputs) / jobs)) + chunks = [selected_inputs[i:i + chunk_size] for i in range(0, len(selected_inputs), chunk_size)] + + print(f"Running analyzer on {len(selected_inputs)} file(s) across {len(chunks)} job(s).") + + def run_chunk(i, chunk): + chunk_sarif = f"{sarif_out_path}.chunk{i}" if sarif_out_path else None + + cmd = analyzer_cmd( + analyzer=analyzer, + inputs=chunk, + fmt="json", + compdb_path=compdb_path, + base_dir=args.base_dir, + extra_args=args.analyzer_arg, + sarif_out=chunk_sarif, + ) + run = subprocess.run(cmd, check=False, capture_output=True, text=True) + return i, run, chunk_sarif + + diags = [] + has_error = False + all_sarif_files = [] + + with concurrent.futures.ThreadPoolExecutor(max_workers=jobs) as executor: + futures = [executor.submit(run_chunk, i, c) for i, c in enumerate(chunks)] + for fut in concurrent.futures.as_completed(futures): + i, run, chunk_sarif = fut.result() + + if chunk_sarif and os.path.exists(chunk_sarif): + all_sarif_files.append(chunk_sarif) + + if run.returncode != 0: + if run.stdout: + sys.stdout.write(run.stdout) + if run.stderr: + sys.stderr.write(run.stderr) + has_error = True + else: + try: + payload = json.loads(run.stdout) + d = payload.get("diagnostics", []) + if isinstance(d, list): + diags.extend(d) + except json.JSONDecodeError as exc: + print(f"Analyzer returned invalid JSON: {exc}", file=sys.stderr) + has_error = True + + if has_error: return 2 + if sarif_out_path and all_sarif_files: + merged = None + for p in all_sarif_files: + with open(p, 'r') as f: + try: + data = json.load(f) + if merged is None: + merged = data + else: + if data.get("runs") and merged.get("runs"): + merged["runs"][0].setdefault("results", []).extend(data["runs"][0].get("results", [])) + except json.JSONDecodeError: + pass + os.unlink(p) + if merged: + with open(sarif_out_path, 'w') as f: + json.dump(merged, f) + if args.json_out: json_output_path = Path(args.json_out) ensure_parent(json_output_path) - json_output_path.write_text(run.stdout, encoding="utf-8") - - diags = payload.get("diagnostics", []) - if not isinstance(diags, list): - diags = [] + json_output_path.write_text(json.dumps({"diagnostics": diags}, indent=2), encoding="utf-8") errors = sum(1 for d in diags if sev(d) == "ERROR") warnings = sum(1 for d in diags if sev(d) == "WARNING") From 9045e8941be8f56c867e248475d68e172bba1598 Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Tue, 14 Apr 2026 12:04:24 +0200 Subject: [PATCH 04/12] ci: enable IR compilation caching in run_test.py execution to dramatically decrease CI runtime --- .github/workflows/ci.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 180b034..905bb07 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -117,12 +117,20 @@ jobs: restore-keys: | run-test-${{ runner.os }}- + - name: Restore compile_ir cache + uses: actions/cache@v4 + with: + path: .cache/compile-ir + key: compile-ir-${{ runner.os }}-${{ hashFiles('test/**') }} + restore-keys: | + compile-ir-${{ runner.os }}- + - name: Test Stack Usage Analyzer timeout-minutes: 45 run: | TEST_JOBS="$(python3 -c 'import os; print(max(1, min(8, os.cpu_count() or 1)))')" echo "Running run_test.py with ${TEST_JOBS} job(s)" - EXTRA_ANALYZER_ARGS="" + EXTRA_ANALYZER_ARGS="--compile-ir-cache-dir=.cache/compile-ir" CORETRACE_RUN_TEST_EXTRA_ANALYZER_ARGS="${EXTRA_ANALYZER_ARGS}" \ python3 -u run_test.py --jobs="${TEST_JOBS}" From 70f2256a6eeea5cb3b1e90dbcfeda2c8a80a39d6 Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Fri, 17 Apr 2026 06:31:52 +0200 Subject: [PATCH 05/12] chore: inject telemetry in run_test.py to debug infinite loop --- run_test.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/run_test.py b/run_test.py index be16554..10f077c 100755 --- a/run_test.py +++ b/run_test.py @@ -54,7 +54,11 @@ def _get_file_hash(p: Path) -> str: return _FILE_HASH_CACHE[cache_key] try: - h = hashlib.sha256(p.read_bytes()).hexdigest() + h_obj = hashlib.sha256() + with p.open('rb') as f: + for byte_block in iter(lambda: f.read(65536), b""): + h_obj.update(byte_block) + h = h_obj.hexdigest() except OSError: h = "" @@ -2922,6 +2926,8 @@ def check_file(c_path: Path): """ Check that, for this file, all expectations are present in the analyzer output. """ + sys.stderr.write(f"[DEBUG] Started check_file for: {c_path.name}\n") + sys.stderr.flush() report_lines = [f"=== Testing {c_path} ==="] ( expectations, @@ -2937,6 +2943,8 @@ def check_file(c_path: Path): ) if not expectations and not negative_expectations and not strict_enabled: report_lines.append(" (no expectations found, skipping)") + sys.stderr.write(f"[DEBUG] Finished check_file (skip) for: {c_path.name}\n") + sys.stderr.flush() return True, 0, 0, "\n".join(report_lines) + "\n\n" def evaluate_pass(pass_name: str, analyzer_output: str): @@ -3079,16 +3087,22 @@ def evaluate_pass(pass_name: str, analyzer_output: str): total += smt_total passed += smt_passed + sys.stderr.write(f"[DEBUG] Finished check_file for: {c_path.name}\n") + sys.stderr.flush() return all_ok, total, passed, "\n".join(report_lines) + "\n\n" def _run_check_parallel(dispatch, fn): """Run a check function in a worker thread with output capture.""" + sys.stderr.write(f"[DEBUG] Starting check: {fn.__name__}\n") + sys.stderr.flush() dispatch.register_thread() try: ok = fn() finally: output = dispatch.unregister_thread() + sys.stderr.write(f"[DEBUG] Finished check: {fn.__name__}\n") + sys.stderr.flush() return ok, output From c092690832e7c770393527b5c0b28c5ff61a8f01 Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Fri, 17 Apr 2026 06:57:45 +0200 Subject: [PATCH 06/12] Revert "chore: inject telemetry in run_test.py to debug infinite loop" This reverts commit 70f2256a6eeea5cb3b1e90dbcfeda2c8a80a39d6. --- run_test.py | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/run_test.py b/run_test.py index 10f077c..be16554 100755 --- a/run_test.py +++ b/run_test.py @@ -54,11 +54,7 @@ def _get_file_hash(p: Path) -> str: return _FILE_HASH_CACHE[cache_key] try: - h_obj = hashlib.sha256() - with p.open('rb') as f: - for byte_block in iter(lambda: f.read(65536), b""): - h_obj.update(byte_block) - h = h_obj.hexdigest() + h = hashlib.sha256(p.read_bytes()).hexdigest() except OSError: h = "" @@ -2926,8 +2922,6 @@ def check_file(c_path: Path): """ Check that, for this file, all expectations are present in the analyzer output. """ - sys.stderr.write(f"[DEBUG] Started check_file for: {c_path.name}\n") - sys.stderr.flush() report_lines = [f"=== Testing {c_path} ==="] ( expectations, @@ -2943,8 +2937,6 @@ def check_file(c_path: Path): ) if not expectations and not negative_expectations and not strict_enabled: report_lines.append(" (no expectations found, skipping)") - sys.stderr.write(f"[DEBUG] Finished check_file (skip) for: {c_path.name}\n") - sys.stderr.flush() return True, 0, 0, "\n".join(report_lines) + "\n\n" def evaluate_pass(pass_name: str, analyzer_output: str): @@ -3087,22 +3079,16 @@ def evaluate_pass(pass_name: str, analyzer_output: str): total += smt_total passed += smt_passed - sys.stderr.write(f"[DEBUG] Finished check_file for: {c_path.name}\n") - sys.stderr.flush() return all_ok, total, passed, "\n".join(report_lines) + "\n\n" def _run_check_parallel(dispatch, fn): """Run a check function in a worker thread with output capture.""" - sys.stderr.write(f"[DEBUG] Starting check: {fn.__name__}\n") - sys.stderr.flush() dispatch.register_thread() try: ok = fn() finally: output = dispatch.unregister_thread() - sys.stderr.write(f"[DEBUG] Finished check: {fn.__name__}\n") - sys.stderr.flush() return ok, output From f959f4df03a66984fe0596c16003187d051a58c3 Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Fri, 17 Apr 2026 07:04:33 +0200 Subject: [PATCH 07/12] chore: re-trigger CI to verify pipeline speed and stability From b88780b2964ad552deaf21ee1747ec8e6f051e4a Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Tue, 21 Apr 2026 11:39:40 +0200 Subject: [PATCH 08/12] chore: retry CI build to bypass transient apt network error From 1c4a7ee52030e70f25204033a8892ae01bb9094a Mon Sep 17 00:00:00 2001 From: Arthurvroum Date: Tue, 21 Apr 2026 11:49:00 +0200 Subject: [PATCH 09/12] ci: remove redundant branch push trigger to fix cancellation statuses --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 905bb07..8a406db 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,7 +5,7 @@ on: pull_request: branches: [main] push: - branches: [main, "70-investigation-excessive-ci-execution-time-on-ubuntu-runners"] + branches: [main] workflow_dispatch: concurrency: From 5be1a7858b81677d2666adb6ae4365ec5d911c85 Mon Sep 17 00:00:00 2001 From: Hugo Date: Wed, 22 Apr 2026 12:55:50 +0900 Subject: [PATCH 10/12] test(fail expected): try CI failure --- test/alloca/wrong-alloca.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 test/alloca/wrong-alloca.c diff --git a/test/alloca/wrong-alloca.c b/test/alloca/wrong-alloca.c new file mode 100644 index 0000000..41aefd4 --- /dev/null +++ b/test/alloca/wrong-alloca.c @@ -0,0 +1,19 @@ +#include +#include +#include + +int foo(uint8_t small_size) +{ + size_t size_allocation = (size_t)small_size * 1024; + + char* buff = (char*)alloca(size_allocation); + + if (!buff) + goto error; + + return 0; + +error: + + return 1; +} From c0d9986079f1beb30dac84ca5204cabeb65c7165 Mon Sep 17 00:00:00 2001 From: Hugo Date: Wed, 22 Apr 2026 13:01:48 +0900 Subject: [PATCH 11/12] test(wrong-test): patch the wrong test --- test/alloca/wrong-alloca.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/alloca/wrong-alloca.c b/test/alloca/wrong-alloca.c index 41aefd4..e485429 100644 --- a/test/alloca/wrong-alloca.c +++ b/test/alloca/wrong-alloca.c @@ -17,3 +17,14 @@ int foo(uint8_t small_size) return 1; } + +// at line 9, column 25 +// [ !!Warn ] dynamic stack allocation detected for variable 'buff' +// ↳ allocated type: i8 +// ↳ size of this allocation is not compile-time constant (VLA / variable alloca) and may lead to unbounded stack usage + +// at line 9, column 25 +// [ !!Warn ] user-controlled alloca size for variable 'buff' +// ↳ allocation performed via alloca/VLA; stack usage grows with runtime value +// ↳ size is unbounded at compile time +// ↳ size depends on user-controlled input (function argument or non-local value) From 538bfd3e788875e5baade667fe82f8f2794c66bb Mon Sep 17 00:00:00 2001 From: Hugo Date: Wed, 22 Apr 2026 13:02:12 +0900 Subject: [PATCH 12/12] test(wrong-test): patch the wrong test --- test/alloca/wrong-alloca.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/alloca/wrong-alloca.c b/test/alloca/wrong-alloca.c index e485429..cb2fb9a 100644 --- a/test/alloca/wrong-alloca.c +++ b/test/alloca/wrong-alloca.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: Apache-2.0 #include #include #include @@ -5,7 +6,6 @@ int foo(uint8_t small_size) { size_t size_allocation = (size_t)small_size * 1024; - char* buff = (char*)alloca(size_allocation); if (!buff)