Coverage for hiperta_stream/scripts/create_log_summary.py: 0%
28 statements
« prev ^ index » next coverage.py v7.4.3, created at 2024-07-16 10:16 +0000
« prev ^ index » next coverage.py v7.4.3, created at 2024-07-16 10:16 +0000
1#!/usr/bin/env python
3# E. Garcia - Mar 2021
5import os
6import argparse
9def read_main_log_file(log_file):
10 """Read the jobids_hiperta_stream_$TIMESTAMP.txt file"""
12 with open(log_file, 'r') as f:
13 job_ids = f.readline().strip('\n')
15 timestamp = log_file.split('.')[0].split('jobids_hiperta_stream_')[-1]
17 log_dir = os.path.join(
18 os.path.dirname(os.path.abspath(log_file)),
19 f"logs_RTA_{timestamp}"
20 )
22 return job_ids, log_dir, timestamp
25def parse_jobs_batched_by_rta(job_ids, log_dir, timestamp):
26 """Find jobs batched by the jobs listening to the RTA process and streams."""
28 with open(f'summary_log_{timestamp}.txt', 'w+') as fileout:
29 for jid in job_ids.split(','):
30 with open(os.path.join(log_dir, f'slurm-{jid}.out'), 'r') as job_log:
31 list_job_log = job_log.readlines()
32 for i, line in enumerate(list_job_log):
33 if i == 0:
34 fileout.write(line)
35 if line.startswith('slurm_exec'):
36 batched_jobid = line.split()[-2].replace(')', '')
37 job_type = list_job_log[i+1].split('-J ')[-1].split(' --wrap')[0]
38 fileout.write(f"{batched_jobid} : {job_type}\n")
41def main():
42 parser = argparse.ArgumentParser(description="Create summary log with all the run and batched hiperta_stream "
43 "jobs.")
45 parser.add_argument('--input_file', '-i', action='store', type=str,
46 dest='input_file',
47 help='TEMPORAL IMPLEMENTATION. Path to jobids_hiperta_stream_$TIMESTAMP.txt file',
48 default=None
49 )
51 args = parser.parse_args()
53 jobids, logs_dir, timestamp = read_main_log_file(args.input_file)
55 parse_jobs_batched_by_rta(jobids, logs_dir, timestamp)
58if __name__ == '__main__':
59 main()