From 7664762232a3e6fa12d56442062b550620a0dd8e Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Tue, 3 Feb 2026 00:24:07 +1000 Subject: [PATCH 1/2] add fetch-cache script for downloading CI cached images --- .scripts/fetch-cache | 416 +++++++++++++++++++++++++++++++++++++++++++ Makefile | 58 +++++- 2 files changed, 473 insertions(+), 1 deletion(-) create mode 100755 .scripts/fetch-cache diff --git a/.scripts/fetch-cache b/.scripts/fetch-cache new file mode 100755 index 00000000..45327178 --- /dev/null +++ b/.scripts/fetch-cache @@ -0,0 +1,416 @@ +#!/usr/bin/env python3 + +""" +Fetches pre-built dependency images from GitHub Actions artifacts or cache. + +This script downloads cached Docker images for quickstart dependencies from +the GitHub Actions artifacts. If artifacts are not available (expired or cache hit), +it falls back to downloading from the GitHub Actions cache. + +After downloading, it loads the images into Docker with the correct tags +expected by the Dockerfile. + +Primary source: Artifacts from the latest completed CI workflow on main branch +Fallback source: GitHub Actions cache (requires gh-actions-cache extension) + +Usage: + .scripts/fetch-cache --tag latest --image-json .image.json + +Requirements: + - gh CLI authenticated with access to stellar/quickstart + - docker CLI available + - jq available + - (optional) gh-actions-cache extension for cache fallback +""" + +import argparse +import json +import os +import platform +import re +import subprocess +import sys +import tempfile +from pathlib import Path + + +def detect_arch(): + """Detect the native architecture (amd64 or arm64).""" + machine = platform.machine().lower() + if machine in ("x86_64", "amd64"): + return "amd64" + elif machine in ("arm64", "aarch64"): + return "arm64" + else: + print(f"Warning: Unknown architecture '{machine}', defaulting to amd64", file=sys.stderr) + return "amd64" + + +def run_cmd(cmd, capture=True, check=True, verbose=True): + """Run a command and return output.""" + if verbose: + print(f" Running: {' '.join(cmd)}", file=sys.stderr) + result = subprocess.run( + cmd, + capture_output=capture, + text=True, + check=check + ) + return result.stdout.strip() if capture else None + + +def run_cmd_quiet(cmd, check=True): + """Run a command quietly, only showing output on failure.""" + result = subprocess.run( + cmd, + capture_output=True, + text=True, + check=False + ) + if check and result.returncode != 0: + print(f"Command failed: {' '.join(cmd)}", file=sys.stderr) + print(f"stdout: {result.stdout}", file=sys.stderr) + print(f"stderr: {result.stderr}", file=sys.stderr) + raise subprocess.CalledProcessError(result.returncode, cmd) + return result + + +def find_ci_runs_on_main(repo, limit=10): + """Find recent completed CI workflow runs on main branch.""" + try: + output = run_cmd([ + "gh", "run", "list", + "-R", repo, + "--workflow", "ci.yml", + "--branch", "main", + "--status", "success", + "--limit", str(limit), + "--json", "databaseId,event,createdAt,headSha", + ], check=False) + + if output and output.strip(): + return json.loads(output) + except (subprocess.CalledProcessError, json.JSONDecodeError) as e: + print(f" Warning: Failed to list CI runs: {e}", file=sys.stderr) + return [] + + +def list_artifacts_for_run(repo, run_id): + """List all artifacts for a workflow run.""" + all_artifacts = [] + page = 1 + per_page = 100 + + while True: + try: + output = run_cmd([ + "gh", "api", + f"repos/{repo}/actions/runs/{run_id}/artifacts?per_page={per_page}&page={page}", + "--jq", ".artifacts" + ], check=False, verbose=False) + + if not output: + break + + artifacts = json.loads(output) + if not artifacts: + break + + all_artifacts.extend(artifacts) + + # If we got fewer than per_page, we've reached the end + if len(artifacts) < per_page: + break + + page += 1 + except (subprocess.CalledProcessError, json.JSONDecodeError) as e: + print(f" Warning: Failed to list artifacts (page {page}): {e}", file=sys.stderr) + break + + return all_artifacts + + +def download_artifact(repo, run_id, artifact_name, dest_dir): + """Download an artifact from a workflow run.""" + try: + run_cmd([ + "gh", "run", "download", str(run_id), + "-R", repo, + "-n", artifact_name, + "-D", dest_dir + ]) + return True + except subprocess.CalledProcessError as e: + print(f" Warning: Failed to download artifact {artifact_name}: {e}", file=sys.stderr) + return False + + +def find_cache_entry(repo, cache_prefix, dep_name, dep_id, arch): + """Find a cache entry for a specific dependency.""" + try: + output = run_cmd([ + "gh", "api", + f"repos/{repo}/actions/caches", + "--jq", f'.actions_caches[] | select(.ref == "refs/heads/main") | select(.key | contains("image-{dep_name}-{dep_id}-{arch}"))', + "--paginate" + ], check=False, verbose=False) + + if output: + # Return the first matching cache entry + for line in output.strip().split('\n'): + if line: + try: + return json.loads(line) + except json.JSONDecodeError: + continue + except subprocess.CalledProcessError: + pass + return None + + +def download_from_cache(repo, cache_key, dest_dir): + """ + Note: GitHub Actions caches cannot be downloaded outside of GitHub Actions. + This function exists for documentation purposes and always returns False. + The cache API only allows listing and deleting caches, not downloading. + """ + print(" Note: GitHub Actions caches cannot be downloaded outside of Actions workflows", file=sys.stderr) + print(" The cache exists but is only accessible within GitHub Actions runners", file=sys.stderr) + return False + + +def discover_cache_id(repo, cache_prefix, arch): + """ + Discover the highest cache_id that has cached images. + Returns the cache_id as a string, or None if not found. + """ + print("Discovering available cache IDs...", file=sys.stderr) + + try: + output = run_cmd([ + "gh", "api", + f"repos/{repo}/actions/caches", + "--jq", '.actions_caches[] | select(.ref == "refs/heads/main") | .key', + "--paginate" + ], check=False) + + if not output: + return None + + cache_keys = output.strip().split('\n') + + # Extract cache_ids from keys matching our pattern + # Pattern: quickstart-{cache_id}-image-{name}-{dep_id}-{arch}.tar + pattern = re.compile(rf'^{re.escape(cache_prefix)}(\d+)-image-') + cache_ids = set() + for key in cache_keys: + match = pattern.match(key) + if match: + cache_ids.add(int(match.group(1))) + + if cache_ids: + highest = max(cache_ids) + print(f" Found cache IDs: {sorted(cache_ids)}, using highest: {highest}", file=sys.stderr) + return str(highest) + + except subprocess.CalledProcessError: + pass + + return None + + +def load_image_json(image_json_path): + """Load the .image.json file and extract deps.""" + with open(image_json_path, 'r') as f: + data = json.load(f) + return data + + +def docker_load(tar_path, expected_tag): + """Load a Docker image from a tar file.""" + print(f" Loading Docker image from {tar_path}...", file=sys.stderr) + run_cmd(["docker", "load", "-i", tar_path]) + + # Verify the image was loaded + result = run_cmd_quiet(["docker", "images", "-q", expected_tag], check=False) + if result.stdout.strip(): + print(f" Verified: {expected_tag}", file=sys.stderr) + return True + else: + print(f" Warning: Image {expected_tag} not found after load", file=sys.stderr) + return False + + +def main(): + parser = argparse.ArgumentParser(description="Fetch pre-built dependency images from GitHub Actions artifacts or cache") + parser.add_argument("--tag", default="latest", help="Image tag from images.json (default: latest)") + parser.add_argument("--image-json", default=".image.json", help="Path to processed .image.json file") + parser.add_argument("--cache-id", default="", help="Cache ID to use (auto-detected if not provided)") + parser.add_argument("--cache-prefix", default="quickstart-", help="Cache key prefix") + parser.add_argument("--repo", default="stellar/quickstart", help="GitHub repository") + parser.add_argument("--arch", default="", help="Architecture (auto-detected if not provided)") + args = parser.parse_args() + + # Detect architecture + arch = args.arch if args.arch else detect_arch() + print(f"Architecture: {arch}", file=sys.stderr) + + # Load image configuration + if not os.path.exists(args.image_json): + print(f"Error: {args.image_json} not found. Run 'make .image.json TAG={args.tag}' first.", file=sys.stderr) + sys.exit(1) + + image_data = load_image_json(args.image_json) + deps = image_data.get("deps", []) + + if not deps: + print(f"Error: No deps found in {args.image_json}", file=sys.stderr) + sys.exit(1) + + print(f"Found {len(deps)} dependencies to fetch:", file=sys.stderr) + for dep in deps: + print(f" - {dep['name']}: {dep['repo']}@{dep.get('sha', dep.get('ref', 'unknown'))[:12]}... (id: {dep.get('id', 'unknown')[:12]}...)", file=sys.stderr) + + # Determine cache_id for fallback + cache_id = args.cache_id + if not cache_id: + cache_id = discover_cache_id(args.repo, args.cache_prefix, arch) + if not cache_id: + print(" No cache_id discovered, defaulting to 19", file=sys.stderr) + cache_id = "19" + + print(f"Using cache_id: {cache_id}", file=sys.stderr) + + # Find recent CI runs on main branch + print(f"\nLooking for CI runs on main branch...", file=sys.stderr) + ci_runs = find_ci_runs_on_main(args.repo, limit=10) + + if ci_runs: + print(f" Found {len(ci_runs)} recent CI runs", file=sys.stderr) + else: + print(f" No completed CI runs found on main branch", file=sys.stderr) + + # Build a map of all available artifacts across all recent runs + print(f"\nBuilding artifact index from recent CI runs...", file=sys.stderr) + artifact_index = {} # artifact_name -> (run_id, artifact_info) + + for run in ci_runs: + run_id = run['databaseId'] + artifacts = list_artifacts_for_run(args.repo, run_id) + for artifact in artifacts: + name = artifact['name'] + if name.endswith('.tar') and not artifact.get('expired', False): + if name not in artifact_index: + artifact_index[name] = (run_id, artifact) + + print(f" Found {len(artifact_index)} tar artifacts", file=sys.stderr) + + # Try to download each dependency + success_count = 0 + failed_deps = [] + + with tempfile.TemporaryDirectory() as tmpdir: + tmpdir = Path(tmpdir) + + for dep in deps: + dep_name = dep["name"] + dep_id = dep.get("id") + dep_sha = dep.get("sha") + + if not dep_id: + print(f"Error: dep '{dep_name}' missing 'id' field", file=sys.stderr) + failed_deps.append(dep_name) + continue + + if not dep_sha: + print(f"Error: dep '{dep_name}' missing 'sha' field", file=sys.stderr) + failed_deps.append(dep_name) + continue + + print(f"\nFetching {dep_name}...", file=sys.stderr) + + # Expected filename and Docker tag + tar_artifact_name = f"image-{dep_name}-{dep_id}-{arch}.tar" + cache_key = f"{args.cache_prefix}{cache_id}-{tar_artifact_name}" + expected_tag = f"stellar-{dep_name}:{dep_sha}-{arch}" + + downloaded = False + tar_path = tmpdir / tar_artifact_name + + # Strategy 1: Try to download from artifacts (primary source) + if tar_artifact_name in artifact_index: + run_id, artifact_info = artifact_index[tar_artifact_name] + print(f" Found artifact in CI run {run_id}", file=sys.stderr) + + artifact_dir = tmpdir / f"artifact-{dep_name}" + artifact_dir.mkdir(exist_ok=True) + + if download_artifact(args.repo, run_id, tar_artifact_name, str(artifact_dir)): + # Find the downloaded tar file + for f in artifact_dir.iterdir(): + if f.suffix == '.tar' or f.name.endswith('.tar') or f.name == 'image': + tar_path = f + downloaded = True + print(f" Downloaded from artifacts: {tar_artifact_name}", file=sys.stderr) + break + else: + print(f" Artifact not found in recent CI runs", file=sys.stderr) + + # Strategy 2: Fall back to GitHub Actions cache + if not downloaded: + print(f" Trying GitHub Actions cache fallback...", file=sys.stderr) + + cache_entry = find_cache_entry(args.repo, args.cache_prefix, dep_name, dep_id, arch) + if cache_entry: + cache_key = cache_entry.get('key', cache_key) + print(f" Found cache entry: {cache_key}", file=sys.stderr) + + cache_dir = tmpdir / f"cache-{dep_name}" + cache_dir.mkdir(exist_ok=True) + + if download_from_cache(args.repo, cache_key, str(cache_dir)): + # Find the downloaded tar file + for root, dirs, files in os.walk(cache_dir): + for f in files: + if f.endswith('.tar'): + tar_path = Path(root) / f + downloaded = True + print(f" Downloaded from cache: {cache_key}", file=sys.stderr) + break + if downloaded: + break + else: + print(f" No matching cache entry found", file=sys.stderr) + + # Load the image if downloaded + if downloaded and tar_path.exists(): + if docker_load(str(tar_path), expected_tag): + success_count += 1 + else: + failed_deps.append(dep_name) + else: + print(f" Failed to download {dep_name}", file=sys.stderr) + failed_deps.append(dep_name) + + # Summary + print(f"\n{'='*60}", file=sys.stderr) + print(f"Summary: {success_count}/{len(deps)} dependencies loaded successfully", file=sys.stderr) + + if failed_deps: + print(f"Failed: {', '.join(failed_deps)}", file=sys.stderr) + print(f"\nNote: Some images could not be fetched. This typically happens when:", file=sys.stderr) + print(f" - Artifacts have expired (GitHub retains them for 7 days)", file=sys.stderr) + print(f" - The CI only builds nightly images on schedule, not 'latest' tag images", file=sys.stderr) + print(f" - Images are in GitHub Actions cache (only accessible within Actions runners)", file=sys.stderr) + print(f"\nTo get pre-built images, you can:", file=sys.stderr) + print(f" 1. Trigger a CI build by pushing to main branch", file=sys.stderr) + print(f" 2. Build from source: make build TAG={args.tag}", file=sys.stderr) + print(f" 3. Try a different tag that was recently built (e.g., nightly)", file=sys.stderr) + sys.exit(1) + + print(f"\nAll dependencies loaded successfully! You can now run:", file=sys.stderr) + print(f" make build-with-cache TAG={args.tag}", file=sys.stderr) + + +if __name__ == "__main__": + main() diff --git a/Makefile b/Makefile index 67598b0e..c1132b6b 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,27 @@ -__PHONY__: run logs console build build-deps build-deps-xdr build-deps-core build-deps-horizon build-deps-friendbot build-deps-rpc build-deps-lab test +__PHONY__: run logs console build build-with-cache build-deps build-deps-xdr build-deps-core build-deps-horizon build-deps-friendbot build-deps-rpc build-deps-lab test fetch-cache REVISION=$(shell git -c core.abbrev=no describe --always --exclude='*' --long --dirty) TAG?=latest +# Cache settings for fetch-cache target +CACHE_ID?= +CACHE_PREFIX?=quickstart- +CACHE_REPO?=stellar/quickstart + +# Detect native architecture for Docker images +UNAME_M := $(shell uname -m) +ifeq ($(UNAME_M),x86_64) + ARCH := amd64 +else ifeq ($(UNAME_M),amd64) + ARCH := amd64 +else ifeq ($(UNAME_M),arm64) + ARCH := arm64 +else ifeq ($(UNAME_M),aarch64) + ARCH := arm64 +else + ARCH := amd64 +endif + # Process images.json through the images-with-extras script IMAGE_JSON=.image.json .image.json: images.json .scripts/images-with-extras @@ -47,6 +66,20 @@ build: $(IMAGE_JSON) --build-arg FRIENDBOT_REPO="$(FRIENDBOT_REPO)" --build-arg FRIENDBOT_REF="$(FRIENDBOT_SHA)" --build-arg FRIENDBOT_OPTIONS='$(FRIENDBOT_OPTIONS)' \ --build-arg LAB_REPO="$(LAB_REPO)" --build-arg LAB_REF=$(LAB_SHA) +# Build using pre-fetched cached images. +# Run 'make fetch-cache TAG=...' first to download the dependency images. +# This is much faster than 'make build' as it skips compiling dependencies. +build-with-cache: $(IMAGE_JSON) + docker build -t stellar/quickstart:$(TAG) -f Dockerfile . \ + --build-arg REVISION=$(REVISION) \ + --build-arg XDR_IMAGE=stellar-xdr:$(XDR_SHA)-$(ARCH) \ + --build-arg CORE_IMAGE=stellar-core:$(CORE_SHA)-$(ARCH) \ + --build-arg RPC_IMAGE=stellar-rpc:$(RPC_SHA)-$(ARCH) \ + --build-arg GALEXIE_IMAGE=stellar-galexie:$(GALEXIE_SHA)-$(ARCH) \ + --build-arg HORIZON_IMAGE=stellar-horizon:$(HORIZON_SHA)-$(ARCH) \ + --build-arg FRIENDBOT_IMAGE=stellar-friendbot:$(FRIENDBOT_SHA)-$(ARCH) \ + --build-arg LAB_IMAGE=stellar-lab:$(LAB_SHA)-$(ARCH) + # Run the same tests that CI runs against a running quickstart container. # Build and run the container first with: make build run TAG=... # These mirror the tests run in the CI workflow (.github/workflows/internal-test.yml) @@ -58,3 +91,26 @@ test: go run tests/test_friendbot.go go run tests/test_stellar_rpc_up.go go run tests/test_stellar_rpc_healthy.go + +# Fetch pre-built dependency images from GitHub Actions cache or artifacts. +# This downloads cached Docker images from the stellar/quickstart repository's +# CI workflow, allowing faster local builds by skipping dependency compilation. +# +# Primary source: GitHub Actions cache (only accessible in GitHub Actions) +# Fallback source: Artifacts from latest completed CI workflow on main branch +# +# Usage: +# make fetch-cache # Fetch deps for TAG=latest +# make fetch-cache TAG=testing # Fetch deps for a specific tag +# make fetch-cache CACHE_ID=19 # Use a specific cache ID +# make fetch-cache CACHE_REPO=myorg/quickstart # Use a different repo +# +# After fetching, run: make build-with-cache TAG=... +fetch-cache: $(IMAGE_JSON) + .scripts/fetch-cache \ + --tag "$(TAG)" \ + --image-json "$(IMAGE_JSON)" \ + --cache-id "$(CACHE_ID)" \ + --cache-prefix "$(CACHE_PREFIX)" \ + --repo "$(CACHE_REPO)" \ + --arch "$(ARCH)" From 5bc8a1a514846e482e59124b0bfa67ba4e24c622 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Tue, 3 Feb 2026 00:45:47 +1000 Subject: [PATCH 2/2] simplify fetch-cache to use artifacts only, remove cache fallback --- .scripts/fetch-cache | 186 +++++++++---------------------------------- Makefile | 39 +++------ 2 files changed, 46 insertions(+), 179 deletions(-) diff --git a/.scripts/fetch-cache b/.scripts/fetch-cache index 45327178..c95aa5b8 100755 --- a/.scripts/fetch-cache +++ b/.scripts/fetch-cache @@ -1,33 +1,24 @@ #!/usr/bin/env python3 """ -Fetches pre-built dependency images from GitHub Actions artifacts or cache. +Fetches pre-built dependency images from GitHub Actions artifacts. This script downloads cached Docker images for quickstart dependencies from -the GitHub Actions artifacts. If artifacts are not available (expired or cache hit), -it falls back to downloading from the GitHub Actions cache. - -After downloading, it loads the images into Docker with the correct tags -expected by the Dockerfile. - -Primary source: Artifacts from the latest completed CI workflow on main branch -Fallback source: GitHub Actions cache (requires gh-actions-cache extension) +the stellar/quickstart CI workflow artifacts, then loads them into Docker +with the stage tags expected by the Dockerfile. Usage: - .scripts/fetch-cache --tag latest --image-json .image.json + .scripts/fetch-cache --tag nightly --image-json .image.json Requirements: - gh CLI authenticated with access to stellar/quickstart - docker CLI available - - jq available - - (optional) gh-actions-cache extension for cache fallback """ import argparse import json import os import platform -import re import subprocess import sys import tempfile @@ -145,80 +136,6 @@ def download_artifact(repo, run_id, artifact_name, dest_dir): return False -def find_cache_entry(repo, cache_prefix, dep_name, dep_id, arch): - """Find a cache entry for a specific dependency.""" - try: - output = run_cmd([ - "gh", "api", - f"repos/{repo}/actions/caches", - "--jq", f'.actions_caches[] | select(.ref == "refs/heads/main") | select(.key | contains("image-{dep_name}-{dep_id}-{arch}"))', - "--paginate" - ], check=False, verbose=False) - - if output: - # Return the first matching cache entry - for line in output.strip().split('\n'): - if line: - try: - return json.loads(line) - except json.JSONDecodeError: - continue - except subprocess.CalledProcessError: - pass - return None - - -def download_from_cache(repo, cache_key, dest_dir): - """ - Note: GitHub Actions caches cannot be downloaded outside of GitHub Actions. - This function exists for documentation purposes and always returns False. - The cache API only allows listing and deleting caches, not downloading. - """ - print(" Note: GitHub Actions caches cannot be downloaded outside of Actions workflows", file=sys.stderr) - print(" The cache exists but is only accessible within GitHub Actions runners", file=sys.stderr) - return False - - -def discover_cache_id(repo, cache_prefix, arch): - """ - Discover the highest cache_id that has cached images. - Returns the cache_id as a string, or None if not found. - """ - print("Discovering available cache IDs...", file=sys.stderr) - - try: - output = run_cmd([ - "gh", "api", - f"repos/{repo}/actions/caches", - "--jq", '.actions_caches[] | select(.ref == "refs/heads/main") | .key', - "--paginate" - ], check=False) - - if not output: - return None - - cache_keys = output.strip().split('\n') - - # Extract cache_ids from keys matching our pattern - # Pattern: quickstart-{cache_id}-image-{name}-{dep_id}-{arch}.tar - pattern = re.compile(rf'^{re.escape(cache_prefix)}(\d+)-image-') - cache_ids = set() - for key in cache_keys: - match = pattern.match(key) - if match: - cache_ids.add(int(match.group(1))) - - if cache_ids: - highest = max(cache_ids) - print(f" Found cache IDs: {sorted(cache_ids)}, using highest: {highest}", file=sys.stderr) - return str(highest) - - except subprocess.CalledProcessError: - pass - - return None - - def load_image_json(image_json_path): """Load the .image.json file and extract deps.""" with open(image_json_path, 'r') as f: @@ -226,27 +143,33 @@ def load_image_json(image_json_path): return data -def docker_load(tar_path, expected_tag): - """Load a Docker image from a tar file.""" - print(f" Loading Docker image from {tar_path}...", file=sys.stderr) - run_cmd(["docker", "load", "-i", tar_path]) +def docker_load_and_tag(tar_path, source_tag, stage_tag): + """Load a Docker image from a tar file and tag it with the stage name.""" + print(f" Loading Docker image...", file=sys.stderr) + run_cmd(["docker", "load", "-i", tar_path], verbose=False) - # Verify the image was loaded - result = run_cmd_quiet(["docker", "images", "-q", expected_tag], check=False) - if result.stdout.strip(): - print(f" Verified: {expected_tag}", file=sys.stderr) - return True - else: - print(f" Warning: Image {expected_tag} not found after load", file=sys.stderr) + # Verify the image was loaded with its original tag + check_result = run_cmd_quiet(["docker", "images", "-q", source_tag], check=False) + if not check_result.stdout.strip(): + print(f" Warning: Image {source_tag} not found after load", file=sys.stderr) + return False + + # Tag the image with the stage name expected by Dockerfile + print(f" Tagging as {stage_tag}...", file=sys.stderr) + try: + run_cmd(["docker", "tag", source_tag, stage_tag], verbose=False) + except subprocess.CalledProcessError: + print(f" Warning: Failed to tag image as {stage_tag}", file=sys.stderr) return False + + print(f" Loaded: {stage_tag}", file=sys.stderr) + return True def main(): - parser = argparse.ArgumentParser(description="Fetch pre-built dependency images from GitHub Actions artifacts or cache") + parser = argparse.ArgumentParser(description="Fetch pre-built dependency images from GitHub Actions artifacts") parser.add_argument("--tag", default="latest", help="Image tag from images.json (default: latest)") parser.add_argument("--image-json", default=".image.json", help="Path to processed .image.json file") - parser.add_argument("--cache-id", default="", help="Cache ID to use (auto-detected if not provided)") - parser.add_argument("--cache-prefix", default="quickstart-", help="Cache key prefix") parser.add_argument("--repo", default="stellar/quickstart", help="GitHub repository") parser.add_argument("--arch", default="", help="Architecture (auto-detected if not provided)") args = parser.parse_args() @@ -269,17 +192,7 @@ def main(): print(f"Found {len(deps)} dependencies to fetch:", file=sys.stderr) for dep in deps: - print(f" - {dep['name']}: {dep['repo']}@{dep.get('sha', dep.get('ref', 'unknown'))[:12]}... (id: {dep.get('id', 'unknown')[:12]}...)", file=sys.stderr) - - # Determine cache_id for fallback - cache_id = args.cache_id - if not cache_id: - cache_id = discover_cache_id(args.repo, args.cache_prefix, arch) - if not cache_id: - print(" No cache_id discovered, defaulting to 19", file=sys.stderr) - cache_id = "19" - - print(f"Using cache_id: {cache_id}", file=sys.stderr) + print(f" - {dep['name']}: {dep['repo']}@{dep.get('sha', dep.get('ref', 'unknown'))[:12]}...", file=sys.stderr) # Find recent CI runs on main branch print(f"\nLooking for CI runs on main branch...", file=sys.stderr) @@ -289,6 +202,7 @@ def main(): print(f" Found {len(ci_runs)} recent CI runs", file=sys.stderr) else: print(f" No completed CI runs found on main branch", file=sys.stderr) + sys.exit(1) # Build a map of all available artifacts across all recent runs print(f"\nBuilding artifact index from recent CI runs...", file=sys.stderr) @@ -329,15 +243,15 @@ def main(): print(f"\nFetching {dep_name}...", file=sys.stderr) - # Expected filename and Docker tag + # Expected filename and Docker tags tar_artifact_name = f"image-{dep_name}-{dep_id}-{arch}.tar" - cache_key = f"{args.cache_prefix}{cache_id}-{tar_artifact_name}" - expected_tag = f"stellar-{dep_name}:{dep_sha}-{arch}" + source_tag = f"stellar-{dep_name}:{dep_sha}-{arch}" + stage_tag = f"stellar-{dep_name}-stage" downloaded = False tar_path = tmpdir / tar_artifact_name - # Strategy 1: Try to download from artifacts (primary source) + # Try to download from artifacts if tar_artifact_name in artifact_index: run_id, artifact_info = artifact_index[tar_artifact_name] print(f" Found artifact in CI run {run_id}", file=sys.stderr) @@ -351,40 +265,14 @@ def main(): if f.suffix == '.tar' or f.name.endswith('.tar') or f.name == 'image': tar_path = f downloaded = True - print(f" Downloaded from artifacts: {tar_artifact_name}", file=sys.stderr) + print(f" Downloaded: {tar_artifact_name}", file=sys.stderr) break else: print(f" Artifact not found in recent CI runs", file=sys.stderr) - # Strategy 2: Fall back to GitHub Actions cache - if not downloaded: - print(f" Trying GitHub Actions cache fallback...", file=sys.stderr) - - cache_entry = find_cache_entry(args.repo, args.cache_prefix, dep_name, dep_id, arch) - if cache_entry: - cache_key = cache_entry.get('key', cache_key) - print(f" Found cache entry: {cache_key}", file=sys.stderr) - - cache_dir = tmpdir / f"cache-{dep_name}" - cache_dir.mkdir(exist_ok=True) - - if download_from_cache(args.repo, cache_key, str(cache_dir)): - # Find the downloaded tar file - for root, dirs, files in os.walk(cache_dir): - for f in files: - if f.endswith('.tar'): - tar_path = Path(root) / f - downloaded = True - print(f" Downloaded from cache: {cache_key}", file=sys.stderr) - break - if downloaded: - break - else: - print(f" No matching cache entry found", file=sys.stderr) - # Load the image if downloaded if downloaded and tar_path.exists(): - if docker_load(str(tar_path), expected_tag): + if docker_load_and_tag(str(tar_path), source_tag, stage_tag): success_count += 1 else: failed_deps.append(dep_name) @@ -400,16 +288,14 @@ def main(): print(f"Failed: {', '.join(failed_deps)}", file=sys.stderr) print(f"\nNote: Some images could not be fetched. This typically happens when:", file=sys.stderr) print(f" - Artifacts have expired (GitHub retains them for 7 days)", file=sys.stderr) - print(f" - The CI only builds nightly images on schedule, not 'latest' tag images", file=sys.stderr) - print(f" - Images are in GitHub Actions cache (only accessible within Actions runners)", file=sys.stderr) + print(f" - The requested tag wasn't recently built in CI", file=sys.stderr) print(f"\nTo get pre-built images, you can:", file=sys.stderr) - print(f" 1. Trigger a CI build by pushing to main branch", file=sys.stderr) - print(f" 2. Build from source: make build TAG={args.tag}", file=sys.stderr) - print(f" 3. Try a different tag that was recently built (e.g., nightly)", file=sys.stderr) + print(f" 1. Build from source: make build TAG={args.tag}", file=sys.stderr) + print(f" 2. Try a different tag that was recently built (e.g., nightly)", file=sys.stderr) sys.exit(1) print(f"\nAll dependencies loaded successfully! You can now run:", file=sys.stderr) - print(f" make build-with-cache TAG={args.tag}", file=sys.stderr) + print(f" make build TAG={args.tag}", file=sys.stderr) if __name__ == "__main__": diff --git a/Makefile b/Makefile index c1132b6b..0b62f8b5 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,8 @@ -__PHONY__: run logs console build build-with-cache build-deps build-deps-xdr build-deps-core build-deps-horizon build-deps-friendbot build-deps-rpc build-deps-lab test fetch-cache +__PHONY__: run logs console build build-deps build-deps-xdr build-deps-core build-deps-horizon build-deps-friendbot build-deps-rpc build-deps-lab test fetch-cache REVISION=$(shell git -c core.abbrev=no describe --always --exclude='*' --long --dirty) TAG?=latest -# Cache settings for fetch-cache target -CACHE_ID?= -CACHE_PREFIX?=quickstart- -CACHE_REPO?=stellar/quickstart - # Detect native architecture for Docker images UNAME_M := $(shell uname -m) ifeq ($(UNAME_M),x86_64) @@ -55,6 +50,10 @@ logs: console: docker exec -it stellar /bin/bash +# Build using pre-fetched cached images if available. +# Run 'make fetch-cache TAG=...' first to download the dependency images. +# If cached images exist, Docker will use them automatically. +# Otherwise, dependencies will be built from source. build: $(IMAGE_JSON) docker build -t stellar/quickstart:$(TAG) -f Dockerfile . \ --build-arg REVISION=$(REVISION) \ @@ -66,20 +65,6 @@ build: $(IMAGE_JSON) --build-arg FRIENDBOT_REPO="$(FRIENDBOT_REPO)" --build-arg FRIENDBOT_REF="$(FRIENDBOT_SHA)" --build-arg FRIENDBOT_OPTIONS='$(FRIENDBOT_OPTIONS)' \ --build-arg LAB_REPO="$(LAB_REPO)" --build-arg LAB_REF=$(LAB_SHA) -# Build using pre-fetched cached images. -# Run 'make fetch-cache TAG=...' first to download the dependency images. -# This is much faster than 'make build' as it skips compiling dependencies. -build-with-cache: $(IMAGE_JSON) - docker build -t stellar/quickstart:$(TAG) -f Dockerfile . \ - --build-arg REVISION=$(REVISION) \ - --build-arg XDR_IMAGE=stellar-xdr:$(XDR_SHA)-$(ARCH) \ - --build-arg CORE_IMAGE=stellar-core:$(CORE_SHA)-$(ARCH) \ - --build-arg RPC_IMAGE=stellar-rpc:$(RPC_SHA)-$(ARCH) \ - --build-arg GALEXIE_IMAGE=stellar-galexie:$(GALEXIE_SHA)-$(ARCH) \ - --build-arg HORIZON_IMAGE=stellar-horizon:$(HORIZON_SHA)-$(ARCH) \ - --build-arg FRIENDBOT_IMAGE=stellar-friendbot:$(FRIENDBOT_SHA)-$(ARCH) \ - --build-arg LAB_IMAGE=stellar-lab:$(LAB_SHA)-$(ARCH) - # Run the same tests that CI runs against a running quickstart container. # Build and run the container first with: make build run TAG=... # These mirror the tests run in the CI workflow (.github/workflows/internal-test.yml) @@ -92,25 +77,21 @@ test: go run tests/test_stellar_rpc_up.go go run tests/test_stellar_rpc_healthy.go -# Fetch pre-built dependency images from GitHub Actions cache or artifacts. +# Fetch pre-built dependency images from GitHub Actions artifacts. # This downloads cached Docker images from the stellar/quickstart repository's # CI workflow, allowing faster local builds by skipping dependency compilation. # -# Primary source: GitHub Actions cache (only accessible in GitHub Actions) -# Fallback source: Artifacts from latest completed CI workflow on main branch +# The images are tagged with the stage names expected by the Dockerfile, so +# running 'make build' after this will automatically use the cached images. # # Usage: # make fetch-cache # Fetch deps for TAG=latest # make fetch-cache TAG=testing # Fetch deps for a specific tag -# make fetch-cache CACHE_ID=19 # Use a specific cache ID -# make fetch-cache CACHE_REPO=myorg/quickstart # Use a different repo +# make fetch-cache TAG=nightly # Fetch nightly build deps # -# After fetching, run: make build-with-cache TAG=... +# After fetching, run: make build TAG=... fetch-cache: $(IMAGE_JSON) .scripts/fetch-cache \ --tag "$(TAG)" \ --image-json "$(IMAGE_JSON)" \ - --cache-id "$(CACHE_ID)" \ - --cache-prefix "$(CACHE_PREFIX)" \ - --repo "$(CACHE_REPO)" \ --arch "$(ARCH)"