Nie możesz wybrać więcej, niż 25 tematów Tematy muszą się zaczynać od litery lub cyfry, mogą zawierać myślniki ('-') i mogą mieć do 35 znaków.

210 lines
6.8KB

  1. #!/usr/bin/env python3
  2. import multiprocessing
  3. import os
  4. import pickle
  5. from argparse import ArgumentParser
  6. from math import ceil
  7. from time import sleep
  8. import matplotlib
  9. import pandas as pd
  10. import matplotlib.pyplot as plt
  11. from mpl_toolkits import axisartist
  12. from mpl_toolkits.axes_grid1 import host_subplot
  13. def csv_to_dataframe(csv_list, folder, dummy):
  14. global n
  15. global frame_list
  16. transmission_df = None
  17. for csv in csv_list:
  18. tmp_df = pd.read_csv(
  19. "{}{}".format(folder, csv),
  20. dtype=dict(is_retranmission=bool, is_dup_ack=bool),
  21. )
  22. tmp_df["datetime"] = pd.to_datetime(tmp_df["datetime"]) - pd.Timedelta(hours=1)
  23. tmp_df = tmp_df.set_index("datetime")
  24. tmp_df.index = pd.to_datetime(tmp_df.index)
  25. if transmission_df is None:
  26. transmission_df = tmp_df
  27. else:
  28. transmission_df = pd.concat([transmission_df, tmp_df])
  29. n.value += 1
  30. frame_list.append(transmission_df)
  31. from itertools import islice
  32. def chunk(it, size):
  33. it = iter(it)
  34. return iter(lambda: tuple(islice(it, size)), ())
  35. def plot_cdf(dataframe, column_name, axis=None):
  36. stats_df = dataframe \
  37. .groupby(column_name) \
  38. [column_name] \
  39. .agg("count") \
  40. .pipe(pd.DataFrame) \
  41. .rename(columns={column_name: "frequency"})
  42. # PDF
  43. stats_df["PDF"] = stats_df["frequency"] / sum(stats_df["frequency"])
  44. # CDF
  45. stats_df["CDF"] = stats_df["PDF"].cumsum()
  46. stats_df = stats_df.reset_index()
  47. if axis:
  48. stats_df.plot(x=column_name, y=["CDF"], grid=True, ax=axis)
  49. else:
  50. stats_df.plot(x=column_name, y=["CDF"], grid=True)
  51. if __name__ == "__main__":
  52. parser = ArgumentParser()
  53. parser.add_argument("--serial1", required=True, help="Serial csv file1.")
  54. parser.add_argument("--serial2", required=True, help="Serial csv file2.")
  55. parser.add_argument("--folder1", required=True, help="PCAP csv folder1.")
  56. parser.add_argument("--folder2", required=True, help="PCAP csv folder2.")
  57. parser.add_argument("--save", default=None, help="Location to save pdf file.")
  58. parser.add_argument(
  59. "-c",
  60. "--cores",
  61. default=1,
  62. type=int,
  63. help="Number of cores for multiprocessing.",
  64. )
  65. parser.add_argument(
  66. "-i",
  67. "--interval",
  68. default=2,
  69. type=int,
  70. help="Time interval for rolling window.",
  71. )
  72. args = parser.parse_args()
  73. transmission_df_list = list()
  74. for f in [args.folder1, args.folder2]:
  75. manager = multiprocessing.Manager()
  76. n = manager.Value("i", 0)
  77. frame_list = manager.list()
  78. jobs = []
  79. # load all pcap csv into one dataframe
  80. pcap_csv_list = list()
  81. for filename in os.listdir(f):
  82. if filename.endswith(".csv") and "tcp" in filename:
  83. pcap_csv_list.append(filename)
  84. parts = chunk(pcap_csv_list, ceil(len(pcap_csv_list) / args.cores))
  85. print("Start processing with {} jobs.".format(args.cores))
  86. for p in parts:
  87. process = multiprocessing.Process(target=csv_to_dataframe, args=(p, f, "dummy"))
  88. jobs.append(process)
  89. for j in jobs:
  90. j.start()
  91. print("Started all jobs.")
  92. # Ensure all the processes have finished
  93. finished_job_counter = 0
  94. working = ["|", "/", "-", "\\", "|", "/", "-", "\\"]
  95. w = 0
  96. while len(jobs) != finished_job_counter:
  97. sleep(1)
  98. print(
  99. "\r\t{}{}{}\t Running {} jobs ({} finished). Processed {} out of {} pcap csv files. ({}%) ".format(
  100. working[w],
  101. working[w],
  102. working[w],
  103. len(jobs),
  104. finished_job_counter,
  105. n.value,
  106. len(pcap_csv_list),
  107. round((n.value / len(pcap_csv_list)) * 100, 2),
  108. ),
  109. end="",
  110. )
  111. finished_job_counter = 0
  112. for j in jobs:
  113. if not j.is_alive():
  114. finished_job_counter += 1
  115. if (w + 1) % len(working) == 0:
  116. w = 0
  117. else:
  118. w += 1
  119. print("\r\nSorting table...")
  120. transmission_df = pd.concat(frame_list)
  121. frame_list = None
  122. transmission_df = transmission_df.sort_index()
  123. print("Calculate goodput...")
  124. transmission_df["srtt"] = transmission_df["srtt"].apply(lambda x: x / 10 ** 6)
  125. # key for columns and level for index
  126. transmission_df["goodput"] = transmission_df["payload_size"].groupby(pd.Grouper(level="datetime", freq="{}s".format(args.interval))).transform("sum")
  127. transmission_df["goodput"] = transmission_df["goodput"].apply(
  128. lambda x: ((x * 8) / args.interval) / 10**6
  129. )
  130. transmission_df["goodput_rolling"] = transmission_df["payload_size"].rolling("{}s".format(args.interval)).sum()
  131. transmission_df["goodput_rolling"] = transmission_df["goodput_rolling"].apply(
  132. lambda x: ((x * 8) / args.interval) / 10 ** 6
  133. )
  134. # set meta values
  135. cc_algo = transmission_df["congestion_control"].iloc[0]
  136. cc_algo = cc_algo.upper()
  137. transmission_direction = transmission_df["direction"].iloc[0]
  138. # read serial csv
  139. #serial_df = pd.read_csv(args.serial_file)
  140. #serial_df["datetime"] = pd.to_datetime(serial_df["datetime"]) - pd.Timedelta(hours=1)
  141. #serial_df = serial_df.set_index("datetime")
  142. #serial_df.index = pd.to_datetime(serial_df.index)
  143. #serial_df.sort_index()
  144. #transmission_df = pd.merge_asof(
  145. # transmission_df,
  146. # serial_df,
  147. # tolerance=pd.Timedelta("1s"),
  148. # right_index=True,
  149. # left_index=True,
  150. #)
  151. transmission_df_list.append(dict(
  152. df=transmission_df,
  153. cc_algo=cc_algo,
  154. transmission_direction=transmission_direction
  155. ))
  156. # Plot sRTT CDF
  157. plot_cdf(transmission_df_list[0]["df"], "srtt")
  158. plot_cdf(transmission_df_list[1]["df"], "srtt", axis=plt.gca())
  159. plt.xscale("log")
  160. plt.xlabel("sRTT [s]")
  161. plt.ylabel("CDF")
  162. plt.legend([transmission_df_list[0]["cc_algo"], transmission_df_list[1]["cc_algo"]])
  163. plt.title("{}".format(transmission_direction))
  164. plt.savefig("{}{}_cdf_compare_plot.pdf".format(args.save, "srtt"))
  165. plt.clf()
  166. # Plot goodput CDF
  167. plot_cdf(transmission_df_list[0]["df"], "goodput")
  168. plot_cdf(transmission_df_list[1]["df"], "goodput", axis=plt.gca())
  169. plt.xlabel("goodput [mbps]")
  170. plt.ylabel("CDF")
  171. plt.legend([transmission_df_list[0]["cc_algo"], transmission_df_list[1]["cc_algo"]])
  172. plt.title("{}".format(transmission_direction))
  173. plt.savefig("{}{}_cdf_compare_plot.pdf".format(args.save, "goodput"))