Compare commits

..

No commits in common. "main" and "multipass_fix1" have entirely different histories.

3 changed files with 27 additions and 610 deletions

View File

@ -1,62 +0,0 @@
#cloud-config
hostname: rpios-like-arm64
manage_etc_hosts: true
write_files:
- path: /usr/local/sbin/setup-raspi-repo-and-sys-mods-arm64.sh
permissions: "0755"
content: |
#!/usr/bin/env bash
set -euo pipefail
arch="$(dpkg --print-architecture)"
if [[ "$arch" != "arm64" ]]; then
echo "arm64-only; detected '$arch' -> skipping" >&2
exit 0
fi
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y --no-install-recommends ca-certificates curl gnupg util-linux
install -d -m 0755 /usr/share/keyrings /etc/apt/preferences.d
if [[ ! -s /usr/share/keyrings/raspberrypi-archive-keyring.gpg ]]; then
curl -fsSL https://archive.raspberrypi.org/debian/raspberrypi.gpg.key \
| gpg --dearmor -o /usr/share/keyrings/raspberrypi-archive-keyring.gpg
chmod 0644 /usr/share/keyrings/raspberrypi-archive-keyring.gpg
fi
#cat >/etc/apt/preferences.d/raspi.pref <<'EOF'
#Package: *
#Pin: origin "archive.raspberrypi.org"
#Pin-Priority: 100
#EOF
if [[ ! -f /etc/apt/sources.list.d/raspi.sources ]]; then
cat >/etc/apt/sources.list.d/raspi.sources <<'EOF'
Types: deb
URIs: https://archive.raspberrypi.org/debian/
Suites: trixie
Components: main
Signed-By: /usr/share/keyrings/raspberrypi-archive-keyring.gpg
EOF
fi
apt-get update
# Add custom /etc/rpi-issue for "rpios-like"
if [[ ! -s /etc/rpi-issue ]]; then
{
echo "Raspberry Pi reference trixie-latest $(date -R)"
echo "Emulated using debian-13-genericcloud"
} >/etc/rpi-issue
fi
echo "---- /etc/rpi-issue ----"
cat /etc/rpi-issue || true
echo "------------------------"
runcmd:
- [ bash, -lc, "/usr/local/sbin/setup-raspi-repo-and-sys-mods-arm64.sh" ]

View File

@ -7,6 +7,7 @@
# --clean Stop+delete+purge target VMs (by BASE-<number>, regardless of COUNT)
# Distro selection:
# --debian-13 Debian 13 only (sets IMAGE=$DEBIAN13_IMAGE_URL and BASE=deb13)
# --debian-13 Debian 13 only (sets IMAGE=$DEBIAN13_IMAGE_URL and BASE=deb13)
# --both-distros Run Ubuntu + Debian 13 in parallel: COUNT=N => 2N VMs (default order: interleaved)
# --first-ubuntu (with --both-distros) order: all Ubuntu first, then all Debian
# --first-debian (with --both-distros) order: all Debian first, then all Ubuntu
@ -15,11 +16,6 @@
# --pr 4122 (repeatable) add PR numbers passed to install.txt
# --run-pr 4122 same as --pr but also forces --run (alias: --test-pr)
#
# Cloud-init:
# --cloud-init FILE Apply cloud-init FILE to all VMs (Ubuntu + Debian)
# --cloud-init-rpios Apply RaspberryPiOS-style cloud-init only to Debian side
# (alias of --debian-13, but sets Debian BASE=rpios-d13)
#
# Env vars:
# IIAB_PR="4122 4191" Space-separated PRs
# (compat) IIAB_INSTALL_ARGS If set, used as fallback for IIAB_PR
@ -37,25 +33,14 @@
set -euo pipefail
DPKG_ARCH="$(dpkg --print-architecture)"
# Resolve paths relative to this script (so cloud-init files work from any cwd)
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
# Debian 13 (Trixie) official cloud image (qcow2). Multipass can launch from URL/file:// on Linux.
# Source: Debian cloud images live under cloud.debian.org/images/cloud/ ('genericcloud' includes cloud-init).
DEBIAN13_IMAGE_URL="${DEBIAN13_IMAGE_URL:-https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-${DPKG_ARCH}.qcow2}"
DEBIAN13_IMAGE_URL="${DEBIAN13_IMAGE_URL:-https://cloud.debian.org/images/cloud/trixie/latest/debian-13-genericcloud-amd64.qcow2}"
IMAGE="${IMAGE:-24.04}"
BASE="${BASE:-ubu2404}"
# cloud-init controls
CLOUD_INIT_FILE="${CLOUD_INIT_FILE:-}"
RPIOS_CLOUD_INIT_FILE="${RPIOS_CLOUD_INIT_FILE:-$SCRIPT_DIR/cloud-init-rpios-like-arm64.yaml}"
DEB_CLOUD_INIT_FILE="${DEB_CLOUD_INIT_FILE:-}"
RPIOS_MODE=0
COUNT="${COUNT:-1}"
[ "$DPKG_ARCH" = "arm64" ] && CPUS="${CPUS:-2}" # SBC don't have spare CPUs.
[ "$DPKG_ARCH" = "amd64" ] && CPUS="${CPUS:-3}"
CPUS="${CPUS:-3}"
MEM="${MEM:-4G}"
DISK="${DISK:-20G}"
@ -67,9 +52,7 @@ LOCAL_VARS_URL="${LOCAL_VARS_URL:-https://raw.githubusercontent.com/iiab/iiab/re
WAIT_TRIES="${WAIT_TRIES:-60}" # used ONLY for the first auto-resume
WAIT_SLEEP="${WAIT_SLEEP:-5}"
STAGGER="${STAGGER:-30}"
RESUME_TRIES="${RESUME_TRIES:-3}"
RESUME_RETRY_SLEEP="${RESUME_RETRY_SLEEP:-8}"
STAGGER="${STAGGER:-20}"
ACTION="run"
modules=()
@ -104,16 +87,12 @@ Image shortcuts:
--first-ubuntu With --both-distros: run all Ubuntu first, then Debian
--first-debian With --both-distros: run all Debian first, then Ubuntu
Cloud-init:
--cloud-init FILE Apply FILE to all VMs
--cloud-init-rpios Apply rpios cloud-init only to Debian side; BASE=rpios-d13 (Debian)
PR options:
--pr N Add PR number (repeatable)
--run-pr N Add PR number and force --run
Env:
IMAGE BASE COUNT CPUS MEM DISK IIAB_PR IIAB_FAST LOCAL_VARS_URL WAIT_TRIES WAIT_SLEEP STAGGER RESUME_TRIES RESUME_RETRY_SLEEP
IMAGE BASE COUNT CPUS MEM DISK IIAB_PR IIAB_FAST LOCAL_VARS_URL WAIT_TRIES WAIT_SLEEP STAGGER
EOF
}
@ -128,25 +107,12 @@ while [[ $# -gt 0 ]]; do
--first-ubuntu) FIRST_UBUNTU=1; shift ;;
--first-debian) FIRST_DEBIAN=1; shift ;;
--cloud-init)
[[ $# -lt 2 ]] && { echo "[ERROR] --cloud-init needs a file path"; exit 2; }
CLOUD_INIT_FILE="$2"; shift 2 ;;
--debian-13)
DEBIAN13_ONLY=1
IMAGE="$DEBIAN13_IMAGE_URL"
BASE="deb13"
shift ;;
--cloud-init-rpios)
# Debian-only rpios flavor: also implies Debian 13 selection
DEBIAN13_ONLY=1
RPIOS_MODE=1
IMAGE="$DEBIAN13_IMAGE_URL"
BASE="rpios-d13"
DEB_CLOUD_INIT_FILE="$RPIOS_CLOUD_INIT_FILE"
shift ;;
--module)
[[ $# -lt 2 ]] && { echo "[ERROR] --module needs a value"; exit 2; }
modules+=("$2"); shift 2 ;;
@ -169,7 +135,7 @@ while [[ $# -gt 0 ]]; do
esac
done
# Incoherency checks (fail fast)
# ---- Incoherency checks (fail fast) ----
if [[ "$FIRST_UBUNTU" == "1" && "$FIRST_DEBIAN" == "1" ]]; then
echo "[ERROR] Incoherent options: --first-ubuntu and --first-debian cannot be used together."
exit 2
@ -179,31 +145,7 @@ if [[ ( "$FIRST_UBUNTU" == "1" || "$FIRST_DEBIAN" == "1" ) && "$BOTH_DISTROS" !=
echo "[ERROR] --first-ubuntu/--first-debian requires --both-distros."
exit 2
fi
if ! [[ "${COUNT}" =~ ^[0-9]+$ ]] || (( COUNT < 1 )); then
echo "[ERROR] COUNT must be an integer >= 1 (got: '${COUNT}')" >&2
exit 2
fi
# ---
# Fail-fast validation for cloud-init files
if [[ -n "${CLOUD_INIT_FILE:-}" && ! -f "$CLOUD_INIT_FILE" ]]; then
echo "[ERROR] --cloud-init file not found on host: $CLOUD_INIT_FILE" >&2
exit 2
fi
if [[ -n "${DEB_CLOUD_INIT_FILE:-}" && ! -f "$DEB_CLOUD_INIT_FILE" ]]; then
echo "[ERROR] Debian cloud-init file not found on host: $DEB_CLOUD_INIT_FILE" >&2
exit 2
fi
# RPiOS flavor is meant for arm64 (rpios-like). Fail fast to avoid surprising runs.
if [[ "$RPIOS_MODE" == "1" && "$DPKG_ARCH" != "arm64" ]]; then
echo "[ERROR] --cloud-init-rpios is intended for arm64 hosts (detected: $DPKG_ARCH)." >&2
echo " If you really want to run it on non-arm64, pass a normal --cloud-init FILE instead." >&2
exit 2
fi
# ---
# ----
# Default module
if [[ "${#modules[@]}" -eq 0 ]]; then
@ -223,7 +165,6 @@ if [[ "$BOTH_DISTROS" == "1" ]]; then
UBU_BASE="$UBU_BASE_ORIG"
DEB_IMAGE="$DEBIAN13_IMAGE_URL"
DEB_BASE="deb13"
[[ "$RPIOS_MODE" == "1" ]] && DEB_BASE="rpios-d13"
else
UBU_IMAGE="$IMAGE"
UBU_BASE="$BASE"
@ -269,13 +210,12 @@ wait_all() {
# Escape BASE for regex usage
re_escape() { printf '%s' "$1" | sed -e 's/[].[^$*+?(){}|\\]/\\&/g'; }
declare -A VM_IMAGE VM_CLOUD_INIT
declare -A VM_IMAGE
names=()
build_vm_lists() {
names=()
VM_IMAGE=()
VM_CLOUD_INIT=()
if [[ "$BOTH_DISTROS" == "1" ]]; then
if [[ "$FIRST_UBUNTU" == "1" ]]; then
@ -284,13 +224,11 @@ build_vm_lists() {
local u="${UBU_BASE}-${n}"
names+=("$u")
VM_IMAGE["$u"]="$UBU_IMAGE"
VM_CLOUD_INIT["$u"]="$CLOUD_INIT_FILE"
done
for n in $(seq 0 $((COUNT-1))); do
local d="${DEB_BASE}-${n}"
names+=("$d")
VM_IMAGE["$d"]="$DEB_IMAGE"
VM_CLOUD_INIT["$d"]="${DEB_CLOUD_INIT_FILE:-$CLOUD_INIT_FILE}"
done
elif [[ "$FIRST_DEBIAN" == "1" ]]; then
@ -299,13 +237,11 @@ build_vm_lists() {
local d="${DEB_BASE}-${n}"
names+=("$d")
VM_IMAGE["$d"]="$DEB_IMAGE"
VM_CLOUD_INIT["$d"]="${DEB_CLOUD_INIT_FILE:-$CLOUD_INIT_FILE}"
done
for n in $(seq 0 $((COUNT-1))); do
local u="${UBU_BASE}-${n}"
names+=("$u")
VM_IMAGE["$u"]="$UBU_IMAGE"
VM_CLOUD_INIT["$u"]="$CLOUD_INIT_FILE"
done
else
@ -316,8 +252,6 @@ build_vm_lists() {
names+=("$u" "$d")
VM_IMAGE["$u"]="$UBU_IMAGE"
VM_IMAGE["$d"]="$DEB_IMAGE"
VM_CLOUD_INIT["$u"]="$CLOUD_INIT_FILE"
VM_CLOUD_INIT["$d"]="${DEB_CLOUD_INIT_FILE:-$CLOUD_INIT_FILE}"
done
fi
else
@ -325,11 +259,6 @@ build_vm_lists() {
local vm="${BASE}-${n}"
names+=("$vm")
VM_IMAGE["$vm"]="$IMAGE"
if [[ "$DEBIAN13_ONLY" == "1" ]]; then
VM_CLOUD_INIT["$vm"]="${DEB_CLOUD_INIT_FILE:-$CLOUD_INIT_FILE}"
else
VM_CLOUD_INIT["$vm"]="$CLOUD_INIT_FILE"
fi
done
fi
}
@ -387,20 +316,14 @@ cleanup_vms() {
launch_one() {
local vm="$1"
local img="${VM_IMAGE[$vm]:-}"
[[ -n "$img" ]] || { echo "[ERROR] No image mapping for VM '$vm'"; return 2; }
# Cloud-init file to apply (already validated earlier in fail-fast checks)
local ci="${VM_CLOUD_INIT[$vm]:-}"
local -a ci_args=()
[[ -n "${ci:-}" ]] && ci_args=(--cloud-init "$ci")
[[ -z "$img" ]] && { echo "[ERROR] No image mapping for VM '$vm'"; return 2; }
if vm_exists "$vm"; then
echo "[INFO] VM already exists: $vm"
return 0
fi
echo "[INFO] Launching $vm ..."
multipass launch "$img" -n "$vm" -c "$CPUS" -m "$MEM" -d "$DISK" "${ci_args[@]}" >/dev/null
multipass launch "$img" -n "$vm" -c "$CPUS" -m "$MEM" -d "$DISK" >/dev/null
}
run_install_txt() {
@ -410,17 +333,6 @@ run_install_txt() {
log="$LOGROOT/${vm}.install.${t}.log"
rc="$LOGROOT/${vm}.install.${t}.rc"
echo "[INFO] Waiting for VM readiness before install: $vm"
if ! wait_for_vm "$vm"; then
{
echo "[INFO] Waiting for VM readiness before install: $vm"
echo "[ERROR] VM did not become ready in time (install phase): $vm"
} >"$log"
echo "88" >"$rc"
set_latest_links "$vm" "install" "$log" "$rc"
return 88
fi
echo "[INFO] install.txt in $vm (log $(basename "$log")) ..."
local modules_str pr_str
@ -507,7 +419,7 @@ run_install_txt() {
resume_iiab() {
local vm="$1"
local do_long_wait="$2"
local do_long_wait="$2" # 1 => wait_for_vm (only for first auto-resume), 0 => no long wait
local t log rc
t="$(stamp)"
log="$LOGROOT/${vm}.resume.${t}.log"
@ -516,7 +428,7 @@ resume_iiab() {
echo "[INFO] resume (iiab -f) in $vm (log $(basename "$log")) ..."
set +e
(
{
multipass start "$vm" >/dev/null 2>&1 || true
if [[ "$do_long_wait" == "1" ]]; then
@ -527,56 +439,18 @@ resume_iiab() {
fi
fi
# Retry on rc=255 (SSH connection dropped; often reboot/network restart during apt/upgrade)
# Also neutralize apt-listchanges/pagers to avoid "Waiting for data..." stalls.
r=1
for attempt in $(seq 1 "$RESUME_TRIES"); do
echo "[INFO] Resume attempt ${attempt}/${RESUME_TRIES} on $vm"
multipass exec "$vm" -- bash -lc '
set -euo pipefail
echo "--- resume: sudo /usr/sbin/iiab -f ---"
if ! sudo test -x /usr/sbin/iiab; then
echo "[ERROR] /usr/sbin/iiab not found/executable; install likely not finished."
exit 89
fi
# Avoid interactive/pager behaviour during apt actions that IIAB may trigger.
# (apt-listchanges commonly causes: "Waiting for data... (interrupt to abort)")
sudo env \
DEBIAN_FRONTEND=noninteractive \
APT_LISTCHANGES_FRONTEND=none \
NEEDRESTART_MODE=a \
TERM=dumb \
PAGER=cat \
/usr/sbin/iiab -f
echo "--- resume done ---"
'
r=$?
if [[ "$r" -eq 0 ]]; then
break
multipass exec "$vm" -- bash -lc '
set -euo pipefail
echo "--- resume: sudo iiab -f ---"
if command -v iiab >/dev/null 2>&1; then
sudo iiab -f
else
echo "[ERROR] iiab command not found; install likely not finished."
exit 89
fi
if [[ "$r" -eq 255 ]]; then
echo "[WARN] multipass exec rc=255 (connection dropped; likely reboot). Waiting for VM and retrying..."
if ! wait_for_vm "$vm"; then
echo "[ERROR] VM did not become ready after reconnect wait: $vm"
r=88
break
fi
sleep "$RESUME_RETRY_SLEEP"
continue
fi
# Any other non-zero: don't loop forever.
break
done
exit "$r"
) >"$log" 2>&1
echo "--- resume done ---"
'
} >"$log" 2>&1
echo "$?" >"$rc"
set -e
@ -593,7 +467,9 @@ summary() {
[[ -f "$LOGROOT/latest.${vm}.install.rc" ]] && ir="$(cat "$LOGROOT/latest.${vm}.install.rc" 2>/dev/null || echo n/a)"
[[ -f "$LOGROOT/latest.${vm}.resume.rc" ]] && rr="$(cat "$LOGROOT/latest.${vm}.resume.rc" 2>/dev/null || echo n/a)"
printf "%-12s %-8s %-8s %s\n" "$vm" "$ir" "$rr" "latest.${vm}.install.log / latest.${vm}.resume.log"
printf "%-12s %-8s %-8s %s\n" \
"$vm" "$ir" "$rr" \
"latest.${vm}.install.log / latest.${vm}.resume.log"
done
echo
@ -642,7 +518,7 @@ pipeline_parallel_stagger() {
run_install_txt "$vm"
# Only Ubuntu tends to reboot during install; Debian often doesn't.
local waitflag=1
if [[ "$vm" == deb13-* ]]; then
if [[ "${VM_IMAGE[$vm]}" == "$DEBIAN13_IMAGE_URL" ]]; then
waitflag=0
fi
resume_iiab "$vm" "$waitflag"

View File

@ -1,397 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# -----------------------------------------------------------------------------
# cotg_curl_stats.ansiblelike.sh
#
# Stress-test helper that mimics the Ansible playbook logic:
# - "uri" to fetch the APK index with retries/delay/until
# - optional fallback "uri" to a WP JSON endpoint (best-effort)
# - (optional) "get_url"-like APK download with retries, without *.apk.1 files
#
# Positional args (kept compatible with your original script):
# 1: N (default: 300)
# 2: SLEEP_SEC (default: 0.5)
#
# Environment variables:
# UA=ansible-httpget
# RETRIES=5
# DELAY=5
# JSON_URL=... # like code_fetch_apk_url_json (optional)
# DOWNLOAD_BASE=... # like code_download_url (optional, used if DO_DOWNLOAD=1)
# DO_DOWNLOAD=0|1 # if 1, download the latest armv8a APK each loop
# DOWNLOAD_DIR=... # where to store downloaded APKs (default: tmpdir/apk)
# DOWNLOAD_TIMEOUT=60 # curl max-time
# CURL_HTTP=--http1.1 # force HTTP version (Ansible's urllib is typically HTTP/1.1)
# CURL_EXTRA_ARGS="" # extra curl args (space-separated)
# -----------------------------------------------------------------------------
URL="https://www.appdevforall.org/codeonthego/"
N="${1:-500}"
SLEEP_SEC="${2:-0.5}"
UA="${UA:-ansible-httpget}"
RETRIES="${RETRIES:-5}"
DELAY="${DELAY:-5}"
JSON_URL="${JSON_URL:-https://www.appdevforall.org/wp-json/wp/v2/pages/2223}"
DOWNLOAD_BASE="${DOWNLOAD_BASE:-}"
DO_DOWNLOAD="${DO_DOWNLOAD:-0}"
DOWNLOAD_TIMEOUT="${DOWNLOAD_TIMEOUT:-60}"
CURL_HTTP="${CURL_HTTP:---http1.1}"
CURL_EXTRA_ARGS="${CURL_EXTRA_ARGS:-}"
# Patterns (defaults aligned to the playbook vars)
APK_RE="${APK_RE:-CodeOnTheGo-.*?armv8a\\.apk}"
CF_RE='(cloudflare|cf-chl|just a moment|attention required)'
# "uri" logic in your playbook treats these as terminal statuses for the index fetch
ALLOWED_INDEX_STATUS=(200 403 404)
tmpdir="$(mktemp -d)"
trap 'rm -rf "$tmpdir"' EXIT
DOWNLOAD_DIR="${DOWNLOAD_DIR:-$tmpdir/apk}"
mkdir -p "$DOWNLOAD_DIR"
# Stats
declare -A status_count=()
declare -A apk_count=()
declare -A cf_count=()
declare -A blocked_count=()
declare -A retry_hist_index=()
declare -A retry_hist_json=()
declare -A retry_hist_apk=()
min_size=""
max_size=""
sum_size=0
note_size() {
local sz="$1"
[[ -z "$min_size" || "$sz" -lt "$min_size" ]] && min_size="$sz"
[[ -z "$max_size" || "$sz" -gt "$max_size" ]] && max_size="$sz"
sum_size=$((sum_size + sz))
}
in_list() {
local x="$1"; shift
local v
for v in "$@"; do
[[ "$x" == "$v" ]] && return 0
done
return 1
}
# curl -> files, return status code (0 means "undefined" like Ansible when status missing)
# Writes headers/body even if status is 0.
curl_fetch() {
local url="$1" body="$2" hdr="$3" accept_header="${4:-}"
local rc status
local -a extra
# Allow tuning curl behavior to get closer to what Ansible's urllib does.
# (Example: force HTTP/1.1 instead of opportunistic HTTP/2.)
extra=("$CURL_HTTP")
if [[ -n "$CURL_EXTRA_ARGS" ]]; then
# shellcheck disable=SC2206
extra+=( $CURL_EXTRA_ARGS )
fi
: >"$hdr"
: >"$body"
# Disable -e locally to collect rc + still return 0 status on failures
set +e
if [[ -n "$accept_header" ]]; then
curl -sS -L -A "$UA" -m "$DOWNLOAD_TIMEOUT" "${extra[@]}" -H "Accept: $accept_header" -D "$hdr" -o "$body" "$url"
else
curl -sS -L -A "$UA" -m "$DOWNLOAD_TIMEOUT" "${extra[@]}" -D "$hdr" -o "$body" "$url"
fi
rc=$?
set -e
# status = last HTTP status in the chain
status="$(awk '/^HTTP\//{code=$2} END{print code+0}' "$hdr" 2>/dev/null)"
# If curl failed hard, treat as "status undefined"
if [[ $rc -ne 0 || -z "$status" || "$status" -eq 0 ]]; then
echo 0
return 0
fi
echo "$status"
}
# Mimic:
# retries: RETRIES
# delay: DELAY
# until: status is defined and status in [200,403,404]
# failed_when: status is not defined or status not in [200,403,404]
# Returns: "<status> <attempts>" where attempts is how many tries were used.
fetch_index_like_ansible() {
local url="$1" body="$2" hdr="$3"
local attempt status
for ((attempt=1; attempt<=RETRIES; attempt++)); do
status="$(curl_fetch "$url" "$body" "$hdr")"
# until: status defined AND status in allowed
if [[ "$status" -ne 0 ]] && in_list "$status" "${ALLOWED_INDEX_STATUS[@]}"; then
echo "$status $attempt"
return 0
fi
[[ $attempt -lt $RETRIES ]] && sleep "$DELAY"
done
# exhausted retries; status may be 0 or non-allowed
echo "$status $RETRIES"
}
# Mimic your JSON fallback:
# retries: RETRIES
# delay: DELAY
# until: status is defined
# failed_when: false (best-effort)
# Returns: "<status> <attempts>" with status 0 if never got any HTTP status.
fetch_json_best_effort() {
local url="$1" body="$2" hdr="$3"
local attempt status
for ((attempt=1; attempt<=RETRIES; attempt++)); do
status="$(curl_fetch "$url" "$body" "$hdr" 'application/json')"
if [[ "$status" -ne 0 ]]; then
echo "$status $attempt"
return 0
fi
[[ $attempt -lt $RETRIES ]] && sleep "$DELAY"
done
echo "0 $RETRIES"
}
# Extract content.rendered from a WP JSON response (similar to Ansible's parsed json)
# Prints rendered HTML to stdout, or nothing on failure.
extract_wp_rendered() {
local json_file="$1"
if command -v python3 >/dev/null 2>&1; then
python3 - "$json_file" <<'PY'
import json, sys
p = sys.argv[1]
try:
with open(p, 'r', encoding='utf-8', errors='replace') as f:
data = json.load(f)
rendered = data.get('content', {}).get('rendered', '')
if isinstance(rendered, str):
sys.stdout.write(rendered)
except Exception:
pass
PY
else
# Very rough fallback (not JSON-safe); prefer python3.
sed -n 's/.*"rendered"[[:space:]]*:[[:space:]]*"\(.*\)".*/\1/p' "$json_file" | head -n 1
fi
}
# get_url-like download:
# retries: RETRIES
# delay: DELAY
# until: succeeded
# Ensures final file name is exactly dest (downloads to temp then atomic mv).
# Returns: "<ok(0|1)> <attempts> <http_status>"
download_apk_like_ansible() {
local url="$1" dest="$2"
local attempt status tmp_part hdr
hdr="$tmpdir/apk.hdr"
for ((attempt=1; attempt<=RETRIES; attempt++)); do
tmp_part="${dest}.part.$$"
status="$(curl_fetch "$url" "$tmp_part" "$hdr")"
if [[ "$status" -eq 200 && -s "$tmp_part" ]]; then
mv -f "$tmp_part" "$dest"
echo "1 $attempt $status"
return 0
fi
rm -f "$tmp_part"
[[ $attempt -lt $RETRIES ]] && sleep "$DELAY"
done
echo "0 $RETRIES ${status:-0}"
}
echo "URL: $URL"
echo "Iterations: $N Sleep: ${SLEEP_SEC}s UA: $UA"
echo "RETRIES: $RETRIES DELAY: ${DELAY}s"
[[ -n "$JSON_URL" ]] && echo "JSON_URL: $JSON_URL"
[[ -n "$DOWNLOAD_BASE" ]] && echo "DOWNLOAD_BASE: $DOWNLOAD_BASE"
[[ "$DO_DOWNLOAD" == "1" ]] && echo "DO_DOWNLOAD: 1 (dir: $DOWNLOAD_DIR)"
echo
for ((i=1; i<=N; i++)); do
body="$tmpdir/body.$i.html"
hdr="$tmpdir/hdr.$i.txt"
json_body="$tmpdir/json.$i.json"
json_hdr="$tmpdir/jsonhdr.$i.txt"
rendered_body="$tmpdir/rendered.$i.html"
# 1) Fetch index like Ansible 'uri' task
read -r idx_status idx_attempts < <(fetch_index_like_ansible "$URL" "$body" "$hdr")
idx_retries_used=$((idx_attempts - 1))
# classify status for stats
if [[ "$idx_status" -eq 0 ]]; then
status_key="undefined" # like "status is undefined"
else
status_key="$idx_status"
fi
status_count["$status_key"]=$(( ${status_count["$status_key"]:-0} + 1 ))
retry_hist_index["$idx_retries_used"]=$(( ${retry_hist_index["$idx_retries_used"]:-0} + 1 ))
size="$(wc -c < "$body" | tr -d ' ')"
note_size "$size"
# 2) Detect whether APK links exist
has_apk=0
if grep -Eqo "$APK_RE" "$body"; then
has_apk=1
fi
# 3) Optional JSON fallback if no APK link found
json_status=0
json_attempts=0
if [[ "$has_apk" -eq 0 && -n "$JSON_URL" ]]; then
read -r json_status json_attempts < <(fetch_json_best_effort "$JSON_URL" "$json_body" "$json_hdr")
json_retries_used=$((json_attempts - 1))
retry_hist_json["$json_retries_used"]=$(( ${retry_hist_json["$json_retries_used"]:-0} + 1 ))
# If 200 and we can extract rendered HTML, replace the body (mimic set_fact update)
if [[ "$json_status" -eq 200 ]]; then
extracted="$(extract_wp_rendered "$json_body" || true)"
if [[ -n "$extracted" ]]; then
printf '%s' "$extracted" > "$rendered_body"
body_to_parse="$rendered_body"
else
body_to_parse="$body"
fi
else
body_to_parse="$body"
fi
# re-check APK links after fallback
if grep -Eqo "$APK_RE" "$body_to_parse"; then
has_apk=1
fi
else
body_to_parse="$body"
fi
# 4) Mimic code_blocked_by_cdn condition:
# blocked if 403 OR (apk_missing even after fallback)
blocked=0
if [[ "$idx_status" -eq 403 || "$has_apk" -eq 0 ]]; then
blocked=1
fi
blocked_count["$blocked"]=$(( ${blocked_count["$blocked"]:-0} + 1 ))
# Additional signal (not in Ansible, but useful)
if grep -Eqi "$CF_RE" "$body_to_parse"; then
cf_count["cf_like"]=$(( ${cf_count["cf_like"]:-0} + 1 ))
else
cf_count["no_cf"]=$(( ${cf_count["no_cf"]:-0} + 1 ))
fi
if [[ "$has_apk" -eq 1 ]]; then
apk_count["apk_found"]=$(( ${apk_count["apk_found"]:-0} + 1 ))
else
apk_count["apk_missing"]=$(( ${apk_count["apk_missing"]:-0} + 1 ))
fi
# 5) Optional APK download like Ansible get_url
dl_ok=0
dl_attempts=0
dl_status=0
if [[ "$DO_DOWNLOAD" == "1" && "$blocked" -eq 0 ]]; then
# Match playbook behavior: Jinja2 `sort` is lexicographic.
apk_name="$(grep -Eo "$APK_RE" "$body_to_parse" | LC_ALL=C sort | tail -n 1 || true)"
if [[ -n "$apk_name" ]]; then
# Match Ansible playbook behavior: download comes from a separate base domain.
# If DOWNLOAD_BASE is not set, fall back to the index URL's directory.
if [[ -n "$DOWNLOAD_BASE" ]]; then
apk_url="${DOWNLOAD_BASE%/}/$apk_name"
else
apk_url="${URL%/}/$apk_name"
fi
apk_dest="$DOWNLOAD_DIR/$apk_name"
read -r dl_ok dl_attempts dl_status < <(download_apk_like_ansible "$apk_url" "$apk_dest")
dl_retries_used=$((dl_attempts - 1))
retry_hist_apk["$dl_retries_used"]=$(( ${retry_hist_apk["$dl_retries_used"]:-0} + 1 ))
fi
fi
# progress line
printf "[%03d/%03d] idx=%s(attempts=%s) apk=%s blocked=%s json=%s(attempts=%s)\r" \
"$i" "$N" \
"${idx_status:-0}" "$idx_attempts" \
"$([[ "$has_apk" -eq 1 ]] && echo Y || echo N)" \
"$blocked" \
"${json_status:-0}" "${json_attempts:-0}"
sleep "$SLEEP_SEC"
done
echo -e "\n\n==== SUMMARY (Ansible-like) ===="
echo "Index status counts (0 means status undefined):"
for k in "${!status_count[@]}"; do
printf " %-10s %d\n" "$k" "${status_count[$k]}"
done | sort -k2,2nr
echo
echo "APK armv8a links:"
for k in "${!apk_count[@]}"; do
printf " %-12s %d\n" "$k" "${apk_count[$k]}"
done | sort -k2,2nr
echo
echo "Blocked (Ansible condition: idx==403 OR apk_missing):"
printf " blocked=1 %d\n" "${blocked_count[1]:-0}"
printf " blocked=0 %d\n" "${blocked_count[0]:-0}"
echo
echo "Retry histogram (retries used):"
echo " Index fetch (retries used -> count):"
for k in "${!retry_hist_index[@]}"; do
printf " %s -> %d\n" "$k" "${retry_hist_index[$k]}"
done | sort -n -k1,1
if [[ -n "$JSON_URL" ]]; then
echo " JSON fallback (retries used -> count):"
for k in "${!retry_hist_json[@]}"; do
printf " %s -> %d\n" "$k" "${retry_hist_json[$k]}"
done | sort -n -k1,1
fi
if [[ "$DO_DOWNLOAD" == "1" ]]; then
echo " APK download (retries used -> count):"
for k in "${!retry_hist_apk[@]}"; do
printf " %s -> %d\n" "$k" "${retry_hist_apk[$k]}"
done | sort -n -k1,1
fi
echo
avg_size=$((sum_size / N))
echo "Body size bytes: min=$min_size max=$max_size avg=$avg_size"
echo
if [[ "$DO_DOWNLOAD" == "1" ]]; then
echo "Downloaded APKs are in: $DOWNLOAD_DIR"
fi
echo "Note: files are saved under $tmpdir and will be auto-removed at exit (trap)."