|
| 1 | +from __future__ import annotations |
| 2 | + |
| 3 | +import csv |
| 4 | +import pprint |
| 5 | +import re |
| 6 | +from collections import defaultdict |
| 7 | + |
| 8 | + |
| 9 | +def testing_n_threads(f_in, data): |
| 10 | + threads = re.match(r"Testing (?P<n_threads>.*) threads", f_in.readline()).group("n_threads") |
| 11 | + seconds = re.match( |
| 12 | + r"All threads completed after (?P<n_seconds>.*) seconds", f_in.readline() |
| 13 | + ).group("n_seconds") |
| 14 | + tries = re.match(r"Total number of retry attempts: (?P<n_tries>.*)", f_in.readline()).group( |
| 15 | + "n_tries" |
| 16 | + ) |
| 17 | + data[f"{threads}_sec"].append(float(seconds)) |
| 18 | + data[f"{threads}_try"].append(int(tries)) |
| 19 | + return data |
| 20 | + |
| 21 | + |
| 22 | +def read_table(f_in, data): |
| 23 | + # Initialize the CSV reader with the pipe '|' as the delimiter |
| 24 | + reader = csv.reader(f_in, delimiter="|") |
| 25 | + next(reader) # skip header |
| 26 | + |
| 27 | + for row in reader: |
| 28 | + if "threads " in row: |
| 29 | + continue |
| 30 | + row = [col.strip() for col in row] # noqa:PLW2901 |
| 31 | + if row == []: |
| 32 | + continue |
| 33 | + # Convert numbers to appropriate types (int for threads, float for statistics) |
| 34 | + threads = int(row[0]) |
| 35 | + avg, p50, p90, p99, p100 = map(float, row[1:]) |
| 36 | + # Append the parsed row to the list |
| 37 | + data[f"{threads}_avg"].append(avg) |
| 38 | + data[f"{threads}_p50"].append(p50) |
| 39 | + data[f"{threads}_p90"].append(p90) |
| 40 | + data[f"{threads}_p99"].append(p99) |
| 41 | + data[f"{threads}_p100"].append(p100) |
| 42 | + return data |
| 43 | + |
| 44 | + |
| 45 | +path = "/Users/iris.ho/Github/backpressure/final" |
| 46 | +files = ["main", "local_original_1.5", "local_original_2", "local_server_algo"] |
| 47 | +print_data = {} |
| 48 | +pp = pprint.PrettyPrinter(width=80) |
| 49 | +THREADS = [1, 2, 4, 8, 16, 32, 64, 128, 256] |
| 50 | +for f in files: |
| 51 | + data = defaultdict(list) |
| 52 | + with open(f"{path}/{f}.txt") as f_in: |
| 53 | + for _ in THREADS: |
| 54 | + data = testing_n_threads(f_in, data) |
| 55 | + f_in.readline() |
| 56 | + f_in.readline() |
| 57 | + data = read_table(f_in, data) |
| 58 | + print_data[f] = { |
| 59 | + "avg": [data[f"{thread}_avg"] for thread in THREADS], |
| 60 | + "p50": [data[f"{thread}_p50"] for thread in THREADS], |
| 61 | + "p90": [data[f"{thread}_p90"] for thread in THREADS], |
| 62 | + "p99": [data[f"{thread}_p99"] for thread in THREADS], |
| 63 | + "p100": [data[f"{thread}_p100"] for thread in THREADS], |
| 64 | + } |
| 65 | +print(print_data) # noqa: T201 |
0 commit comments