Skip to content

Commit

Permalink
Update: Logic added to download the last 10 runs to then compare
Browse files Browse the repository at this point in the history
The idea is that currently only downloading the last 2 runs means that the script will break if the last 2 runs failed for some reason.

Now, we download the last 10 runs, in which we pull out the one that is currently running and a previous run that has a status of success. This will ensure a consistent ability to compare runs.
  • Loading branch information
sihammill committed Feb 17, 2025
1 parent 7d742f4 commit ff42d4e
Showing 1 changed file with 66 additions and 53 deletions.
119 changes: 66 additions & 53 deletions tf_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,72 +55,85 @@ def get_workflow_runs():
headers['Authorization'] = f'Bearer {github_token}'
headers['Accept'] = 'application/vnd.github.v3+json'

response = requests.get(f"https://api.github.com/repos/{repos[1]}/actions/workflows/{workflow}/runs?per_page=2", headers=headers, stream=True)
response = requests.get(f"https://api.github.com/repos/{repos[1]}/actions/workflows/{workflow}/runs?per_page=10", headers=headers, stream=True)

if response.status_code == 200:
# Parse the JSON response
artifact_data = response.json()

workflow_runs = artifact_data.get("workflow_runs", [])
# Filter the runs for 'success' or 'in_progress' status (conclusion)
valid_runs = [run for run in artifact_data['workflow_runs'] if run['conclusion'] in ['success', 'in_progress']]

branch = workflow_runs[0]['head_branch']
# check that there is at least one run that is a success
if [run for run in valid_runs['workflow_runs'] if run['conclusion'] in ['success']]:

# download workflow run log for new run that is currently in progress
allowed_workflows = {'Beta', 'Monthly', 'Red'}
if workflow_runs[0]['display_title'] in allowed_workflows and workflow_runs[1]['display_title'] in allowed_workflows and branch == 'develop':

for index, run in enumerate(workflow_runs, start=1):
# add workflow run id to array
_artifactRunID.append(run['id'])
# Sort by the 'created' field in descending order to get the latest first
sorted_runs = sorted(valid_runs, key=lambda x: datetime.fromisoformat(x['created_at'].replace('Z', '+00:00')), reverse=True)

if index == 1:
url = f"https://api.github.com/repos/{repos[1]}/actions/runs/{run['id']}/logs"
workflow_runs = []

# Download logs
print(f"Downloading Log files for run: {run['id']}")
response = requests.get(url, headers=headers)
time.sleep(1)
if response.status_code == 200:
print(f"Log files downloaded for run: {run['id']}")
with open("logs.zip", "wb") as f:
f.write(response.content)
testing = unzip_log_files()

with open(testing[0], "r") as file:
content = file.read()
match = re.search(r"Runtime: Version\s*(.*)", content)
#return match.group(1) if match else None # Returns only the version part
global RTVersion
RTVersion = match.group(1)
print(f"Log files processed and Runtime version extracted: RT v{RTVersion}")

# remove log files
# Get all files in the directory
files = glob.glob(os.path.join(baseSaveLocation, "logs.zip"))

for file in files:
if os.path.isfile(file): # Ensure it's a file (not a folder)
# Add the latest 2 runs to an array
for run in sorted_runs[:2]:
workflow_runs.append(run)

branch = workflow_runs[0]['head_branch']

# download workflow run log for new run that is currently in progress
allowed_workflows = {'Beta', 'Monthly', 'Red'}
if workflow_runs[0]['display_title'] in allowed_workflows and workflow_runs[1]['display_title'] in allowed_workflows and branch == 'develop':

for index, run in enumerate(workflow_runs, start=1):
# add workflow run id to array
_artifactRunID.append(run['id'])

if index == 1:
url = f"https://api.github.com/repos/{repos[1]}/actions/runs/{run['id']}/logs"

# Download logs
print(f"Downloading Log files for run: {run['id']}")
response = requests.get(url, headers=headers)
time.sleep(1)
if response.status_code == 200:
print(f"Log files downloaded for run: {run['id']}")
with open("logs.zip", "wb") as f:
f.write(response.content)
testing = unzip_log_files()

with open(testing[0], "r") as file:
content = file.read()
match = re.search(r"Runtime: Version\s*(.*)", content)
#return match.group(1) if match else None # Returns only the version part
global RTVersion
RTVersion = match.group(1)
print(f"Log files processed and Runtime version extracted: RT v{RTVersion}")

# remove log files
# Get all files in the directory
files = glob.glob(os.path.join(baseSaveLocation, "logs.zip"))

for file in files:
if os.path.isfile(file): # Ensure it's a file (not a folder)
try:
os.remove(file)
print("Log files deleted.\n")
except Exception as e:
print(f"Error deleting Log Files {file}: {e}")

# Define the folder to be deleted
folder_to_delete = os.path.join(baseSaveLocation, "CI")
if os.path.exists(folder_to_delete) and os.path.isdir(folder_to_delete):
try:
os.remove(file)
print("Log files deleted.\n")
shutil.rmtree(folder_to_delete)
except Exception as e:
print(f"Error deleting Log Files {file}: {e}")

# Define the folder to be deleted
folder_to_delete = os.path.join(baseSaveLocation, "CI")
if os.path.exists(folder_to_delete) and os.path.isdir(folder_to_delete):
try:
shutil.rmtree(folder_to_delete)
except Exception as e:
print(f"Error deleting folder {folder_to_delete}: {e}")
else:
print("Failed to fetch logs:", response.text)
get_artifact_URL()
else:
print("Valid workflow not used, only Beta, Monthly or Red on the develop branch is accepted for the TF Compare script")
print(f"Error deleting folder {folder_to_delete}: {e}")
else:
print("Failed to fetch logs:", response.text)
get_artifact_URL()
else:
print("Valid workflow not used, only Beta, Monthly or Red on the develop branch is accepted for the TF Compare script")
print(f"No successful previous runs available in the last 10 downloaded")
else:
print(f"Failed to download artifact. HTTP Status: {response.status_code}")
sys.exit(response.text) # Print error details



Expand Down

0 comments on commit ff42d4e

Please sign in to comment.