TG-PlatformPlus/UserScripts/report.py

157 lines
7.1 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

from datetime import datetime
import os, csv, json
import pandas as pd
import numpy as np
class JsonEncoder(json.JSONEncoder):
"""Convert numpy classes to JSON serializable objects."""
def default(self, obj):
if isinstance(obj, (np.integer, np.floating, np.bool_)):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(JsonEncoder, self).default(obj)
def fillData(baseName, data, workbook):
info = ''
if isinstance(data, dict):
for key, value in data.items():
if (not isinstance(value, dict)) or (not value):
log(f"{{{baseName}.{key}}}".replace('{.', '{'))
info += f"{{{baseName}.{key}}}"
for worksheet in workbook.sheets:
result = worksheet.used_range.api.find(f"{{{baseName}.{key.replace('{.','{')}}}".replace('{.', '{'))
if result:
if isinstance(value, list):
address = f"{result.address}".split('$')
worksheet.range(f"{result.address}:${address[1]}${int(address[2])+len(value)-1}").options(transpose=True).value = value
else:
worksheet.used_range.api.Replace(f"{{{baseName}.{key}}}", f"{value}")
else:
baseName += f".{key}"
fillData(baseName, value, workbook)
baseName = baseName[0:baseName.rfind('.')]
else:
info += f"{data}"
raise Exception(f"Error in fillData(): Type of data is not dict.")
return info
# 功能概要:把dataLogging指令生成的csv文件转成字典
# 参数说明:
# csvFilePath: 输入csv文件完整路径
# testCaseNameFildName: 从csv文件中选择某一字段作为测试项名称
# nanDataProcessFlag: 指定数据表中包含的nan数据处理模式
# 详细功能:
# 1、根据测试项名称对测试数据进行分类
# 2、根据nanDataProcessFlag处理nan数据
# =0:删除全部为nan的列同时删除包含nan的行
# =1:删除全部为nan的列
def csvLoggingFileToDict(csvFilePath, testCaseNameFildName='', nanDataProcessFlag=0):
info = ''
try:
if not os.path.exists(csvFilePath):
raise Exception(f"{csvFilePath} file doesn't exist.")
tempTitle = []
tempData = []
dfData = pd.DataFrame({})
with open(csvFilePath, "r", encoding='utf-8') as f:
reader = csv.reader(f)
lines = [line for line in reader]
# reversed_lines = lines[::-1]
for line in lines:
if line[0].startswith('DateTime'):
if len(tempData) > 0:
tempDataFrame = pd.DataFrame(dict(zip(tempTitle, list(map(list, zip(*tempData))))))
dfData = pd.concat([dfData, tempDataFrame])
# tempDataFrame.to_csv(f"{csvFilePath}.{tempData[0][1]}.csv")
tempData = []
tempTitle = line
else:
tempData.append(line)
tempDataFrame = pd.DataFrame(dict(zip(tempTitle, list(map(list, zip(*tempData))))))
dfData = pd.concat([dfData, tempDataFrame])
# dfData = dfData.replace(np.nan, '0')
jsonData = {}
#根据测试项提取数据
if testCaseNameFildName !='':
testcaseList = list(dfData[testCaseNameFildName].unique())
if len(testcaseList) > 0:
for item in testcaseList:
temp = dfData[dfData[testCaseNameFildName]==item]
if nanDataProcessFlag == 0:
temp.dropna(how='all', axis=1, inplace=True)
temp.dropna(axis=0, inplace=True)
jsonData['testData'][f"{item}"] = temp.to_dict(orient='list')
elif nanDataProcessFlag == 1:
temp.dropna(how='all', axis=1, inplace=True)
jsonData['testData'][f"{item}"] = temp.to_dict(orient='list')
else:
temp = dfData
temp.dropna(how='all', axis=1, inplace=True)
temp.dropna(axis=0, inplace=True)
jsonData['testData'] = temp.to_dict(orient='list')
else:
temp = dfData
temp.dropna(how='all', axis=1, inplace=True)
if nanDataProcessFlag == 0:
temp.dropna(axis=0, inplace=True)
jsonData['testData'] = temp.to_dict(orient='list')
return jsonData
except Exception as e:
raise Exception(f"Error in csvLoggingFileToDict(): {str(e)}")
# 功能概要:把读内存生成的csv文件转成json文件
# 参数说明:
# csvFilePath: 输入csv文件完整路径
# jsonFilePath: 输出json文件完整路径
# partNum: 内存文件对应的分区号
def csvFlashFileToDict(csvFilePath, dictData, partNum):
info = ''
try:
if not os.path.exists(csvFilePath):
raise Exception(f"{csvFilePath} file doesn't exist.")
tempTitle = []
tempData = []
dfData = pd.DataFrame({})
with open(csvFilePath, "r", encoding='utf-8') as f:
reader = csv.reader(f)
lines = [line for line in reader]
# reversed_lines = lines[::-1]
for line in lines:
if line[0].startswith('DateTime'):
if len(tempData) > 0:
tempDataFrame = pd.DataFrame(dict(zip(tempTitle, list(map(list, zip(*tempData))))))
dfData = pd.concat([dfData, tempDataFrame])
# tempDataFrame.to_csv(f"{csvFilePath}.{tempData[0][1]}.csv")
tempData = []
tempTitle = line
else:
tempData.append(line)
tempDataFrame = pd.DataFrame(dict(zip(tempTitle, list(map(list, zip(*tempData))))))
dfData = pd.concat([dfData, tempDataFrame])
# dfData = dfData.replace(np.nan, '0')
dictData[f"Part{partNum}"] = dfData.to_dict(orient='list')
return dictData
except Exception as e:
raise Exception(f"Error in csvFlashFileToDict(): {str(e)}")
# 功能概要:把读内存生成的csv文件转成json文件
# 参数说明:
# jsonFilePath:进行替换操作的json文件完整路径
# repalceConfigDict: 替换操作配置字典
def replaceFieldInDict(dictData, repalceConfigDict):
info = ''
try:
if not isinstance(repalceConfigDict, dict) or not isinstance(dictData, dict):
raise Exception(f"repalceConfigDict or dictData is not dict.")
tempJsonData_P1 = {f"{repalceConfigDict['MappingTableOfFiledName'][key]}": value for key, value in dictData.items() if key in repalceConfigDict['MappingTableOfFiledName'].keys()}
tempJsonData_P2 = {key: value for key, value in dictData.items() if key not in repalceConfigDict['MappingTableOfFiledName'].keys()}
dictData = {**tempJsonData_P1, **tempJsonData_P2}
return dictData
except Exception as e:
raise Exception(f"Error in replaceFieldInDict(): {str(e)}")