PCM_Report/temperature_table.py

661 lines
26 KiB
Python
Raw Normal View History

2025-12-11 14:32:31 +08:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
测试部位温度记录表生成脚本
- 忽略传入的 experimentProcess自行构造固定结构的数据
- InfluxDB 查询每个测试部位在各时间点的瞬时温度值
- 输出格式与应用中的 scriptTable 占位符兼容
- 默认把 {scriptTable1} 放在测试部位所在的单元格
环境变量
TABLE_TOKEN 目标占位符默认 scriptTable1
TABLE_START_ROW 写入起始行偏移默认 0
TABLE_START_COL 写入起始列偏移默认 0
TABLE_TIME_SLOTS 逗号分隔的时间刻度默认 "0.5h,1h,1.5h,2h,2.5h,3h,3.5h"
TABLE_MOTOR_SPEED 电机转速标签默认 "980RPM"
EXPERIMENT_START 实验开始时间ISO 8601 格式 2024-01-01T10:00:00Z
EXPERIMENT_END 实验结束时间ISO 8601 格式
INFLUX_URL InfluxDB URL
INFLUX_ORG InfluxDB 组织
INFLUX_TOKEN InfluxDB 访问令牌
INFLUX_BUCKET InfluxDB bucket 名称
INFLUX_MEASUREMENT InfluxDB measurement 名称
"""
from __future__ import annotations
import json
import logging
import os
import sys
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional
LOGGER = logging.getLogger(__name__)
def _mask_secret(value: Optional[str]) -> str:
if value is None:
return "<unset>"
value = value.strip()
if not value:
return "<empty>"
if len(value) <= 4:
return "****"
return f"{value[:4]}****"
def _log_environment_variables() -> None:
env_keys = [
"TABLE_TOKEN",
"TABLE_START_ROW",
"TABLE_START_COL",
"TABLE_TIME_SLOTS",
"TABLE_MOTOR_SPEED",
"TABLE_LOG_LEVEL",
"TABLE_LOG_FILE",
"EXPERIMENT_START",
"EXPERIMENT_END",
"INFLUX_URL",
"INFLUX_ORG",
"INFLUX_TOKEN",
"INFLUX_BUCKET",
"INFLUX_MEASUREMENT",
]
for key in env_keys:
raw_value = os.environ.get(key)
if key.endswith("TOKEN") or key.endswith("PASSWORD"):
display_value = _mask_secret(raw_value)
else:
display_value = raw_value if raw_value is not None else "<unset>"
LOGGER.info("ENV %s = %s", key, display_value)
def _read_all_stdin() -> str:
try:
if sys.stdin and not sys.stdin.closed and not sys.stdin.isatty():
return sys.stdin.read()
except Exception:
pass
return ""
def _load_payload() -> Dict[str, Any]:
raw = _read_all_stdin().strip()
if not raw and len(sys.argv) > 1:
arg = sys.argv[1]
if os.path.exists(arg) and os.path.isfile(arg):
with open(arg, "r", encoding="utf-8") as fh:
raw = fh.read()
else:
raw = arg
if not raw:
raw = os.environ.get("EXPERIMENT_JSON", "").strip()
if not raw:
raw = "{}"
data = json.loads(raw)
if not isinstance(data, dict):
raise ValueError("experiment JSON must be a dict")
return data
def _time_slots() -> List[str]:
raw = os.environ.get("TABLE_TIME_SLOTS", "").strip()
if not raw:
# 根据图片时间刻度是0.5h, 1h, 1.5h, 2h, 2.5h, 3h, 3.5h7列
return ["0.5h", "1h", "1.5h", "2h", "2.5h", "3h", "3.5h"]
slots = [slot.strip() for slot in raw.split(",")]
return [slot for slot in slots if slot]
def _default_sections() -> List[Dict[str, Any]]:
# name -> rows underneathentries
# 每个 entry 对应一个测试部位,需要映射到 InfluxDB 的 field 或 tag
return [
{"name": "主轴承", "entries": [
{"label": "#1", "field": "主轴承#1", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#1"},
{"label": "#2", "field": "主轴承#2", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#2"},
{"label": "#3", "field": "主轴承#3", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#3"},
{"label": "#4", "field": "主轴承#4", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#4"},
{"label": "#5", "field": "主轴承#5", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#5"},
{"label": "#6", "field": "主轴承#6", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#6"},
]},
{"name": "十字头", "entries": [
{"label": "#1", "field": "十字头#1", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#1"},
{"label": "#2", "field": "十字头#2", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#2"},
{"label": "#3", "field": "十字头#3", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#3"},
{"label": "#4", "field": "十字头#4", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#4"},
{"label": "#5", "field": "十字头#5", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#5"},
]},
{"name": "减速箱小轴承", "entries": [
{"label": "#1输入法兰端", "field": "减速箱小轴承1", "filters": {"data_type": "LSDAQ"}, "result_key": "减速箱小轴承#1"},
{"label": "#2", "field": "减速箱小轴承#2", "filters": {"data_type": "LSDAQ"}, "result_key": "减速箱小轴承#2"},
]},
{"name": "减速箱大轴承", "entries": [
{"label": "#3大端盖端", "field": "减速箱大轴承#3", "filters": {"data_type": "LSDAQ"}, "result_key": "减速箱大轴承#3"},
{"label": "#4", "field": "减速箱大轴承#4", "filters": {"data_type": "LSDAQ"}, "result_key": "减速箱大轴承#4"},
]},
{"name": "润滑油温", "entries": [
{"label": "", "field": "mean", "filters": {"data_type": "润滑油温"}, "result_key": "润滑油温"},
]},
{"name": "润滑油压", "entries": [
{"label": "(Psi)", "field": "mean", "filters": {"data_type": "润滑油压"}, "result_key": "润滑油压"},
]},
]
def _parse_time_slot(slot_str: str) -> float:
"""解析时间刻度字符串(如 '0.5h', '1h')为小时数"""
slot_str = slot_str.strip().lower()
if slot_str.endswith('h'):
try:
return float(slot_str[:-1])
except ValueError:
pass
return 0.0
def _query_influxdb(
field_name: str,
start_time: datetime,
end_time: datetime,
influx_url: str,
influx_org: str,
influx_token: str,
influx_bucket: str,
influx_measurement: str,
filters: Optional[Dict[str, str]] = None,
) -> Optional[float]:
"""查询 InfluxDB 获取指定字段在指定时间点的瞬时值"""
try:
from influxdb_client import InfluxDBClient
import pandas as pd
import warnings
from influxdb_client.client.warnings import MissingPivotFunction
except ImportError:
LOGGER.warning("InfluxDB client not available, skip query for field=%s", field_name)
return None
try:
client = InfluxDBClient(url=influx_url, org=influx_org, token=influx_token)
query_api = client.query_api()
# 构建 Flux 查询
start_rfc3339 = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
stop_rfc3339 = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
tag_filters = ""
if filters:
for key, value in filters.items():
tag_filters += f'\n |> filter(fn: (r) => r["{key}"] == "{value}")'
LOGGER.debug(
"Querying field=%s measurement=%s target_time=%s filters=%s",
field_name,
influx_measurement,
end_time.strftime('%Y-%m-%dT%H:%M:%SZ'), # 使用end_time作为目标时间点
filters or {},
)
# 修改查询逻辑:查询目标时间点附近的最近一个数据点
# 使用一个小的时间窗口比如前后5分钟来查找最接近的数据点
target_time = end_time
window_minutes = 5 # 前后5分钟的窗口
query_start = target_time - timedelta(minutes=window_minutes)
query_end = target_time + timedelta(minutes=window_minutes)
query_start_rfc = query_start.strftime('%Y-%m-%dT%H:%M:%SZ')
query_end_rfc = query_end.strftime('%Y-%m-%dT%H:%M:%SZ')
flux = f'''
from(bucket: "{influx_bucket}")
|> range(start: {query_start_rfc}, stop: {query_end_rfc})
|> filter(fn: (r) => r["_measurement"] == "{influx_measurement}")
|> filter(fn: (r) => r["_field"] == "{field_name}")
|> filter(fn: (r) => true){tag_filters}
|> sort(columns: ["_time"])
|> last()
|> yield(name: "instantaneous")
'''.strip()
with warnings.catch_warnings():
warnings.simplefilter("ignore", MissingPivotFunction)
frames = query_api.query_data_frame(flux)
if isinstance(frames, list):
df = pd.concat(frames, ignore_index=True) if frames else pd.DataFrame()
else:
df = frames
# 获取瞬时值(最近的一个数据点)
if df.empty or '_value' not in df.columns:
LOGGER.debug("No instantaneous value found for field=%s", field_name)
return None
# 取第一行的值因为查询已经排序并取了last()
instant_value = df['_value'].iloc[0]
if pd.isna(instant_value):
LOGGER.debug("Instantaneous value is NaN for field=%s", field_name)
return None
value = float(instant_value)
# 如果有时间信息,记录实际的数据时间点
if '_time' in df.columns:
actual_time = df['_time'].iloc[0]
LOGGER.debug("Field=%s instantaneous_value=%.3f actual_time=%s", field_name, value, actual_time)
else:
LOGGER.debug("Field=%s instantaneous_value=%.3f", field_name, value)
return value
except Exception as e:
LOGGER.error("Error querying InfluxDB for field=%s: %s", field_name, e)
return None
finally:
try:
client.close()
except Exception:
pass
def _get_default_temperature(field_name: str, filters: Optional[Dict[str, Any]] = None) -> float:
"""为每个测试部位返回默认温度值(用于测试或演示)"""
data_type = ""
if filters:
data_type = str(filters.get("data_type", ""))
defaults: Dict[tuple, float] = {
("主轴承#1", ""): 25.5,
("主轴承#2", ""): 26.0,
("主轴承#3", ""): 25.8,
("主轴承#4", ""): 26.2,
("主轴承#5", ""): 25.9,
("主轴承#6", ""): 26.1,
("十字头#1", ""): 28.0,
("十字头#2", ""): 28.2,
("十字头#3", ""): 27.8,
("十字头#4", ""): 28.1,
("十字头#5", ""): 27.9,
("减速箱小轴承#1", ""): 30.0,
("减速箱小轴承#2", ""): 30.2,
("减速箱大轴承#3", ""): 29.5,
("减速箱大轴承#4", ""): 29.8,
("mean", "润滑油温"): 35.0,
("mean", "润滑油压"): 50.0,
}
return defaults.get((field_name, data_type), 25.0) # 默认25.0度
def _build_cells(
time_slots: List[str],
sections: List[Dict[str, Any]],
motor_speed: str,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
temperature_data: Optional[Dict[str, Dict[str, float]]] = None,
use_defaults: bool = False,
) -> List[Dict[str, Any]]:
cells: List[Dict[str, Any]] = []
def add_cell(row: int, col: int, value: str = "", rowspan: int = 1, colspan: int = 1) -> None:
payload: Dict[str, Any] = {"row": row, "col": col, "value": value}
if rowspan > 1:
payload["rowspan"] = rowspan
if colspan > 1:
payload["colspan"] = colspan
cells.append(payload)
# 模板左侧标题列已经去除,这里仅生成纯数据区,从 (0,0) 开始填入数值。
# current_row 对应模板中的实际数据行索引。
current_row = 0
for section in sections:
entries = section.get("entries") or []
if not entries:
continue
# 每个测试部位子项对应模板中的一行
for entry in entries:
# 支持新格式(带 field 映射)和旧格式(纯字符串)
if isinstance(entry, dict):
field_name = entry.get("field", "")
entry_filters = entry.get("filters")
entry_key = entry.get("result_key") or field_name
else:
field_name = ""
entry_filters = None
entry_key = ""
# 仅输出数值列:列索引直接对应时间段
# 强制填充所有列,优先使用查询数据,否则使用默认值
if field_name:
# 根据配置决定是否启用默认值
default_base_value: Optional[float] = None
if use_defaults:
default_base_value = _get_default_temperature(field_name, entry_filters)
target_key = entry_key or field_name
# 遍历所有时间段列,确保每一列都有数据
for col_idx, slot in enumerate(time_slots):
value = None
# 优先使用查询到的数据
if temperature_data:
slot_data = temperature_data.get(target_key, {})
if slot_data:
slot_key = f"{col_idx}_{slot}"
value = slot_data.get(slot_key)
if value is None and use_defaults and default_base_value is not None:
# 使用基础默认值 + 时间段偏移每个时间段增加0.1度)
time_offset = col_idx * 0.1
value = default_base_value + time_offset
if value is None:
value_str = ""
else:
# 格式化为字符串保留1位小数
value_str = f"{value:.1f}"
add_cell(current_row, col_idx, value_str)
else:
# 如果没有字段名,填充空字符串
for col_idx in range(len(time_slots)):
add_cell(current_row, col_idx, "")
current_row += 1
return cells
def _load_temperature_data(
time_slots: List[str],
sections: List[Dict[str, Any]],
start_time: Optional[datetime],
end_time: Optional[datetime],
) -> Dict[str, Dict[str, float]]:
"""从 InfluxDB 查询所有测试部位在各时间点的瞬时温度值"""
if not start_time or not end_time:
LOGGER.info("Skip data query: missing start/end (%s, %s)", start_time, end_time)
return {}
influx_url = os.environ.get("INFLUX_URL", "").strip()
influx_org = os.environ.get("INFLUX_ORG", "").strip()
influx_token = os.environ.get("INFLUX_TOKEN", "").strip()
influx_bucket = os.environ.get("INFLUX_BUCKET", "PCM").strip()
influx_measurement = os.environ.get("INFLUX_MEASUREMENT", "PCM_Measurement").strip()
if not all([influx_url, influx_org, influx_token, influx_bucket, influx_measurement]):
LOGGER.warning(
"Skip data query: missing Influx config url=%s bucket=%s measurement=%s",
influx_url or "<empty>",
influx_bucket or "<empty>",
influx_measurement or "<empty>",
)
return {}
# 计算总时长(小时)
total_duration = (end_time - start_time).total_seconds() / 3600.0
LOGGER.info(
"Fetch instantaneous temperature data window=%s%s total_hours=%.3f time_points=%s",
start_time.isoformat(),
end_time.isoformat(),
total_duration,
",".join(time_slots),
)
# 收集所有需要查询的字段
query_targets: List[Tuple[str, Dict[str, Any]]] = []
for section in sections:
entries = section.get("entries") or []
for entry in entries:
if isinstance(entry, dict):
field_name = entry.get("field", "")
if field_name:
query_targets.append((field_name, entry))
if not query_targets:
return {}
# 为每个时间点查询瞬时数据
# 时间刻度采用累计写法0.5h、1h、1.5h…),每个时间点查询对应时刻的瞬时值
temperature_data: Dict[str, Dict[str, float]] = {}
# 计算每个时间点的偏移(小时)
# 0.5h表示实验开始后0.5小时的时间点1h表示实验开始后1小时的时间点以此类推
slot_durations = [_parse_time_slot(slot) for slot in time_slots]
positive_slot_durations = [hours for hours in slot_durations if hours > 0]
max_slot_hours = max(positive_slot_durations) if positive_slot_durations else 0.0
force_start_based_window = bool(positive_slot_durations) and (total_duration + 1e-9 < max_slot_hours)
if force_start_based_window:
LOGGER.debug(
"Total duration %.3fh shorter than max slot %.3fh, fall back to start-based windows",
total_duration,
max_slot_hours,
)
prev_slot_end = 0.0
for idx, slot_str in enumerate(time_slots):
slot_hours = slot_durations[idx] if idx < len(slot_durations) else _parse_time_slot(slot_str)
if slot_hours <= 0:
LOGGER.debug("Skip slot index=%d label=%s because parsed hours<=0", idx, slot_str)
continue
if force_start_based_window:
slot_start_offset = prev_slot_end
slot_end_offset = slot_hours
else:
# 限制在实验的有效范围内
slot_end_offset = min(slot_hours, total_duration)
slot_start_offset = min(prev_slot_end, total_duration)
if slot_end_offset <= slot_start_offset:
LOGGER.debug(
"Skip slot index=%d label=%s because end_offset<=start_offset (%.3f<=%.3f)",
idx,
slot_str,
slot_end_offset,
slot_start_offset,
)
prev_slot_end = max(prev_slot_end, slot_end_offset)
continue
slot_start = start_time + timedelta(hours=slot_start_offset)
slot_end = start_time + timedelta(hours=slot_end_offset)
if not force_start_based_window:
# 确保不超过总结束时间
if slot_start >= end_time:
LOGGER.debug(
"Skip slot index=%d label=%s start>=end (%s>=%s)",
idx,
slot_str,
slot_start.isoformat(),
end_time.isoformat(),
)
continue
if slot_end > end_time:
slot_end = end_time
LOGGER.debug(
"Slot index=%d label=%s offsets=%.3f%.3f actual=%s%s",
idx,
slot_str,
slot_start_offset,
slot_end_offset,
slot_start.isoformat(),
slot_end.isoformat(),
)
if force_start_based_window:
prev_slot_end = max(prev_slot_end, slot_hours)
else:
prev_slot_end = max(prev_slot_end, slot_end_offset)
# 查询每个字段在当前时间点的瞬时值
# 使用slot_end作为目标时间点即累计时间点如0.5h, 1h, 1.5h等)
target_time_point = slot_end
for field_name, entry in query_targets:
result_key = entry.get("result_key") or field_name
if not result_key:
result_key = field_name
entry_filters = entry.get("filters") if isinstance(entry, dict) else None
if result_key not in temperature_data:
temperature_data[result_key] = {}
# 使用索引作为key因为可能有重复的时间刻度
slot_key = f"{idx}_{slot_str}" # 使用索引+时间刻度作为唯一key
# 查询瞬时值传入相同的时间点作为start和end函数内部会处理为查找最近的数据点
value = _query_influxdb(
field_name,
target_time_point, # start_time参数函数内部不使用
target_time_point, # end_time参数作为目标时间点
influx_url,
influx_org,
influx_token,
influx_bucket,
influx_measurement,
filters=entry_filters if entry_filters else None,
)
if value is not None:
temperature_data[result_key][slot_key] = value
LOGGER.debug(
"Field=%s result_key=%s slot=%s value=%.3f",
field_name,
result_key,
slot_key,
value,
)
else:
LOGGER.debug(
"Field=%s result_key=%s slot=%s yielded no data",
field_name,
result_key,
slot_key,
)
prev_slot_end = slot_end_offset
return temperature_data
def build_temperature_table(_: Dict[str, Any]) -> Dict[str, Any]:
token = os.environ.get("TABLE_TOKEN", "scriptTable1")
row_offset = int(os.environ.get("TABLE_START_ROW", "0") or 0)
col_offset = int(os.environ.get("TABLE_START_COL", "0") or 0)
motor_speed = os.environ.get("TABLE_MOTOR_SPEED", "980RPM")
# 解析实验时间范围
start_str = os.environ.get("EXPERIMENT_START", "").strip()
end_str = os.environ.get("EXPERIMENT_END", "").strip()
start_time: Optional[datetime] = None
end_time: Optional[datetime] = None
if start_str:
try:
# 支持多种时间格式
for fmt in ["%Y-%m-%dT%H:%M:%SZ"]:
try:
start_time = datetime.strptime(start_str, fmt).astimezone(tz=None)
break
except ValueError:
continue
except Exception as e:
print(f"Warning: Failed to parse EXPERIMENT_START '{start_str}': {e}", file=sys.stderr)
if end_str:
try:
for fmt in ["%Y-%m-%dT%H:%M:%SZ"]:
try:
end_time = datetime.strptime(end_str, fmt).astimezone(tz=None)
break
except ValueError:
continue
except Exception as e:
print(f"Warning: Failed to parse EXPERIMENT_END '{end_str}': {e}", file=sys.stderr)
time_slots = _time_slots()
sections = _default_sections()
# 查询温度数据
temperature_data = _load_temperature_data(time_slots, sections, start_time, end_time)
# 始终禁止默认数据,保证查询不到值时保持空白
use_defaults = False
cells = _build_cells(
time_slots,
sections,
motor_speed,
start_time,
end_time,
temperature_data,
use_defaults=use_defaults
)
for cell in cells:
cell["row"] += 4
start_time_row = 1
start_time_value_col = 1
end_time_value_col = 3
utc_aware_dt = datetime.strptime(start_str, "%Y-%m-%dT%H:%M:%S%z")
local_dt1 = utc_aware_dt.astimezone(tz=None)
local_dt2 = utc_aware_dt.astimezone(tz=None) + timedelta(hours=3.5)
start_time_value = local_dt1.strftime("%Y-%m-%d %H:%M:%S")
end_time_value = local_dt2.strftime("%Y-%m-%d %H:%M:%S")
cells.append({"row": start_time_row, "col": start_time_value_col, "value": start_time_value})
cells.append({"row": start_time_row, "col": end_time_value_col, "value": end_time_value})
influx_url = os.environ.get("INFLUX_URL", "").strip()
influx_org = os.environ.get("INFLUX_ORG", "").strip()
influx_token = os.environ.get("INFLUX_TOKEN", "").strip()
influx_bucket = os.environ.get("INFLUX_BUCKET", "PCM").strip()
influx_measurement = os.environ.get("INFLUX_MEASUREMENT", "PCM_Measurement").strip()
value = _query_influxdb(
"环境温度",
start_time,
end_time,
influx_url,
influx_org,
influx_token,
influx_bucket,
influx_measurement,
filters={"data_type": "LSDAQ"},
)
# 确保value不是None避免Word COM操作异常
if value is not None:
cells.append({"row": 0, "col": 1, "value": f"{value:.1f}"})
else:
cells.append({"row": 0, "col": 1, "value": ""})
return {
"token": token,
"startRow": row_offset,
"startCol": col_offset,
"cells": cells,
}
def main() -> int:
try:
try:
if not logging.getLogger().handlers:
log_level_name = os.environ.get("TABLE_LOG_LEVEL", "DEBUG").strip() or "DEBUG"
log_level = getattr(logging, log_level_name.upper(), logging.DEBUG)
log_file_raw = os.environ.get("TABLE_LOG_FILE", "test.log").strip() or "test.log"
log_file = os.path.abspath(log_file_raw)
logging.basicConfig(
level=log_level,
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
handlers=[
logging.FileHandler(log_file, encoding="utf-8"),
logging.StreamHandler(sys.stderr),
],
)
LOGGER.info("Logging initialized -> file=%s level=%s", log_file, logging.getLevelName(log_level))
_log_environment_variables()
sys.stdout.reconfigure(encoding="utf-8") # type: ignore[attr-defined]
except Exception:
pass
payload = _load_payload()
table_spec = build_temperature_table(payload)
result = {"tables": [table_spec]}
print(json.dumps(result, ensure_ascii=False))
return 0
except Exception as exc:
print(f"error: {exc}", file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main())