Skip to content

Commit

Permalink
Add datasets for one experiment
Browse files Browse the repository at this point in the history
  • Loading branch information
steckhelena committed Jul 12, 2022
1 parent 6c7b9f1 commit a3cdec5
Show file tree
Hide file tree
Showing 11 changed files with 18,024 additions and 31 deletions.
624 changes: 624 additions & 0 deletions RazaDatasets/4g/B_2017.12.17_14.16.19.csv

Large diffs are not rendered by default.

869 changes: 869 additions & 0 deletions RazaDatasets/4g/B_2018.01.27_13.58.28.csv

Large diffs are not rendered by default.

808 changes: 808 additions & 0 deletions RazaDatasets/4g/B_2018.02.12_16.14.01.csv

Large diffs are not rendered by default.

2,144 changes: 2,144 additions & 0 deletions RazaDatasets/5g/Static/B_2019.12.16_13.40.04.csv

Large diffs are not rendered by default.

3,323 changes: 3,323 additions & 0 deletions RazaDatasets/5g/Static/B_2020.01.16_10.43.34.csv

Large diffs are not rendered by default.

3,149 changes: 3,149 additions & 0 deletions RazaDatasets/5g/Static/B_2020.02.13_13.57.29.csv

Large diffs are not rendered by default.

1,014 changes: 1,014 additions & 0 deletions RazaDatasets/5g/Static/B_2020.02.14_13.21.26.csv

Large diffs are not rendered by default.

5,992 changes: 5,992 additions & 0 deletions RazaDatasets/5g/Static/B_2020.02.27_18.39.27.csv

Large diffs are not rendered by default.

112 changes: 98 additions & 14 deletions lab.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
import pathlib
import subprocess
import time
from collections import OrderedDict
from multiprocessing import Process
from time import sleep
Expand All @@ -17,7 +18,7 @@

class Experiment(TypedDict):
id: int
mode: Literal["5g"]
mode: str
mobility: NormalizedDataset
server_type: Union[Literal["asgi"], Literal["wsgi"]]
adaptation_algorithm: Union[
Expand All @@ -26,6 +27,7 @@ class Experiment(TypedDict):
server_protocol: Union[Literal["quic"], Literal["tcp"]]
godash_config_path: str
godash_bin_path: str
mpd_path: str


class ExperimentResult(TypedDict):
Expand Down Expand Up @@ -143,6 +145,10 @@ def get_pcap_output_file_name(experiment: Experiment, client: Host) -> str:
return os.path.join(get_experiment_folder_name(experiment), f"{client.intf()}.pcap")


def get_experiment_result_file_name(experiment: ExperimentResult) -> str:
return os.path.join(experiment["experiment_root_path"], "result.json")


# server settings


Expand Down Expand Up @@ -366,17 +372,95 @@ def run_experiment(experiment: Experiment) -> ExperimentResult:
if __name__ == "__main__":
setLogLevel("info")

normalized_datasets = get_normalized_datasets()

experiment: Experiment = {
"mobility": normalized_datasets[0],
"server_type": "wsgi",
"server_protocol": "tcp",
"mode": "5g",
"id": 2,
"adaptation_algorithm": "bba",
"godash_config_path": "/home/raza/Downloads/goDASHbed/config/configure.json",
"godash_bin_path": "/home/raza/Downloads/goDASH/godash/godash",
}
mpd_paths = [
"4K_non_copyright_dataset/10_sec/x264/bbb/DASH_Files/full/bbb_enc_x264_dash.mpd",
"4K_non_copyright_dataset/10_sec/x264/bbb/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/10_sec/x264/bbb/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/10_sec/x264/sintel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/10_sec/x264/sintel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/10_sec/x264/sintel/DASH_Files/full/sintel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/10_sec/x264/tearsofsteel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/10_sec/x264/tearsofsteel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/10_sec/x264/tearsofsteel/DASH_Files/full/tearsofsteel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/2_sec/x264/bbb/DASH_Files/full/bbb_enc_x264_dash.mpd",
"4K_non_copyright_dataset/2_sec/x264/bbb/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/2_sec/x264/bbb/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/2_sec/x264/sintel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/2_sec/x264/sintel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/2_sec/x264/sintel/DASH_Files/full/sintel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/2_sec/x264/tearsofsteel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/2_sec/x264/tearsofsteel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/2_sec/x264/tearsofsteel/DASH_Files/full/tearsofsteel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/4_sec/x264/bbb/DASH_Files/full/bbb_enc_x264_dash.mpd",
"4K_non_copyright_dataset/4_sec/x264/bbb/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/4_sec/x264/bbb/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/4_sec/x264/sintel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/4_sec/x264/sintel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/4_sec/x264/sintel/DASH_Files/full/sintel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/4_sec/x264/tearsofsteel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/4_sec/x264/tearsofsteel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/4_sec/x264/tearsofsteel/DASH_Files/full/tearsofsteel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/6_sec/x264/bbb/DASH_Files/full/bbb_enc_x264_dash.mpd",
"4K_non_copyright_dataset/6_sec/x264/bbb/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/6_sec/x264/bbb/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/6_sec/x264/sintel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/6_sec/x264/sintel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/6_sec/x264/sintel/DASH_Files/full/sintel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/6_sec/x264/tearsofsteel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/6_sec/x264/tearsofsteel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/6_sec/x264/tearsofsteel/DASH_Files/full/tearsofsteel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/8_sec/x264/bbb/DASH_Files/full/bbb_enc_x264_dash.mpd",
"4K_non_copyright_dataset/8_sec/x264/bbb/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/8_sec/x264/bbb/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/8_sec/x264/sintel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/8_sec/x264/sintel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/8_sec/x264/sintel/DASH_Files/full/sintel_enc_x264_dash.mpd",
"4K_non_copyright_dataset/8_sec/x264/tearsofsteel/DASH_Files/full/dash_audio.mpd",
"4K_non_copyright_dataset/8_sec/x264/tearsofsteel/DASH_Files/full/dash_video_audio.mpd",
"4K_non_copyright_dataset/8_sec/x264/tearsofsteel/DASH_Files/full/tearsofsteel_enc_x264_dash.mpd",
]

datasets = [
"RazaDatasets/4g/B_2017.12.17_14.16.19.csv",
"RazaDatasets/4g/B_2018.01.27_13.58.28.csv",
"RazaDatasets/4g/B_2018.02.12_16.14.01.csv",
"RazaDatasets/5g/Static/B_2019.12.16_13.40.04.csv",
"RazaDatasets/5g/Static/B_2020.01.16_10.43.34.csv",
"RazaDatasets/5g/Static/B_2020.02.13_13.57.29.csv",
"RazaDatasets/5g/Static/B_2020.02.14_13.21.26.csv",
"RazaDatasets/5g/Static/B_2020.02.27_18.39.27.csv",
]

print(run_experiment(experiment))
dataset_modes = ["4g", "4g", "4g", "5g", "5g", "5g", "5g", "5g"]

server_types = ["wsgi", "asgi"]
server_protocols = ["tcp", "quic"]
adaptation_algorithms = ["bba", "conventional", "elastic", "logistic"]

normalized_datasets = get_normalized_datasets(datasets)

for dataset, mode in zip(normalized_datasets, dataset_modes):
for server_type in server_types:
for server_protocol in server_protocols:
for mpd_path in mpd_paths:
print("Starting experiment for: ")
print(f"Dataset: {dataset}")
print(f"Server type: {server_type}")
print(f"Server protocol: {server_protocol}")
print(f"Server mpd: {mpd_path}")

experiment: Experiment = {
"mobility": dataset,
"server_type": server_type,
"server_protocol": server_protocol,
"mode": mode,
"id": int(time.time()),
"adaptation_algorithm": "bba",
"godash_config_path": "/home/raza/Downloads/goDASHbed/config/configure.json",
"godash_bin_path": "/home/raza/Downloads/goDASH/godash/godash",
} # type: ignore

experiment_result = run_experiment(experiment)

with open(get_experiment_result_file_name(experiment_result)) as f:
json.dump(experiment_result, f)
17 changes: 3 additions & 14 deletions normalize_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
import pandas as pd
from pandas.core.frame import DataFrame

from datasets5G import datasets5G


class Limits(TypedDict):
upload_kbps: float
Expand All @@ -18,20 +16,17 @@ class NormalizedDataset(TypedDict):
name: str
data: List[Limits]
total_duration: int
platform: str
mobility: str
case: str
dataset: str


# disable pandas warnings for chaining assignment
pd.options.mode.chained_assignment = None


def get_normalized_datasets() -> List[NormalizedDataset]:
def get_normalized_datasets(datasets) -> List[NormalizedDataset]:
normalized_datasets = []

for filename in datasets5G:
for filename in datasets:
# Normalize dataset using pandas
csv_data: DataFrame = pd.read_csv(filename) # type: ignore

Expand Down Expand Up @@ -86,20 +81,14 @@ def get_normalized_datasets() -> List[NormalizedDataset]:
# Normalize name removing first path
parts = pathlib.Path(filename).parts
normalized_name = "-".join(parts[1:]).strip(".csv")
platform = parts[1]
mobility = parts[2]
case = parts[3] if len(parts) > 4 else parts[1]
dataset = parts[4] if len(parts) > 4 else parts[3]
dataset = parts[-1]

# append normalized results
normalized_datasets.append(
{
"name": normalized_name,
"data": filtered_data.to_dict("records"),
"total_duration": total_duration,
"platform": platform,
"mobility": mobility,
"case": case,
"dataset": dataset,
}
)
Expand Down
3 changes: 0 additions & 3 deletions process_results.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,6 @@ def process_pcap(experiment_result: ExperimentResult):
std_less = 0

if dc_gt100_05 > 2:
print(dc_gt100_05)
tpTime_05_GT100 = d_packets_gt_100_time[-1] - d_packets_gt_100_time[0]
AvgTime_List_GT100 = [
d_packets_gt_100_time[i + 1] - d_packets_gt_100_time[i]
Expand Down Expand Up @@ -205,8 +204,6 @@ def process_pcap(experiment_result: ExperimentResult):
index=False,
header=True,
)
else:
print("Skipped: ", end)

merged_columns = pd.merge(godash_result, df, on="Arr_time", how="left")
merged_columns.to_csv(
Expand Down

0 comments on commit a3cdec5

Please sign in to comment.