main
COT001\DEV 2026-04-09 09:22:01 +08:00
parent acca3cfaf7
commit a111c54582
7 changed files with 4892 additions and 125 deletions

File diff suppressed because one or more lines are too long

View File

@ -98,40 +98,50 @@ def _get_influx_config() -> Dict[str, str]:
def _parse_experiment_times() -> tuple[Optional[datetime], Optional[datetime]]:
"""解析实验时间"""
"""解析实验时间前端传入本地时间转换为UTC用于InfluxDB查询"""
from datetime import timezone, timedelta
start_str = os.environ.get("EXPERIMENT_START", "").strip()
end_str = os.environ.get("EXPERIMENT_END", "").strip()
# LOGGER.debug("原始时间字符串: START=%s, END=%s", start_str, end_str)
start_time: Optional[datetime] = None
end_time: Optional[datetime] = None
if start_str:
try:
for fmt in ["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S%z"]:
for fmt in ["%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f"]:
try:
start_time = datetime.strptime(start_str, fmt)
if start_time.tzinfo is not None:
# 转换为本地时间并去除时区信息
start_time = start_time.astimezone(tz=None).replace(tzinfo=None)
# 本地时间-8小时=UTC
start_time = start_time - timedelta(hours=8)
start_time = start_time.replace(tzinfo=timezone.utc)
# LOGGER.debug("解析START: 本地=%s → UTC=%s", start_str, start_time)
break
except ValueError:
continue
if start_time is None:
LOGGER.warning("无法解析EXPERIMENT_START: %s", start_str)
except Exception as e:
print(f"Warning: Failed to parse EXPERIMENT_START '{start_str}': {e}", file=sys.stderr)
LOGGER.error("解析EXPERIMENT_START失败 '%s': %s", start_str, e)
if end_str:
try:
for fmt in ["%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S%z"]:
for fmt in ["%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S.%f"]:
try:
end_time = datetime.strptime(end_str, fmt)
if end_time.tzinfo is not None:
# 转换为本地时间并去除时区信息
end_time = end_time.astimezone(tz=None).replace(tzinfo=None)
# 本地时间-8小时=UTC
end_time = end_time - timedelta(hours=8)
end_time = end_time.replace(tzinfo=timezone.utc)
# LOGGER.debug("解析END: 本地=%s → UTC=%s", end_str, end_time)
break
except ValueError:
continue
if end_time is None:
LOGGER.warning("无法解析EXPERIMENT_END: %s", end_str)
except Exception as e:
print(f"Warning: Failed to parse EXPERIMENT_END '{end_str}': {e}", file=sys.stderr)
LOGGER.error("解析EXPERIMENT_END失败 '%s': %s", end_str, e)
return start_time, end_time
@ -160,8 +170,8 @@ def _parse_time_slot(slot_str: str) -> float:
def _time_slots() -> List[str]:
raw = os.environ.get("TABLE_TIME_SLOTS", "").strip()
if not raw:
# 根据图片时间刻度是0.5h, 1h, 1.5h, 2h, 2.5h, 3h, 3.5h7列
return ["0.5h", "1h", "1.5h", "2h", "2.5h", "3h", "3.5h"]
# 时间刻度0.5h, 1h, 1.5h, 2h, 2.5h, 3h, 3.4h7列
return ["0.5h", "1h", "1.5h", "2h", "2.5h", "3h", "3.4h"]
slots = [slot.strip() for slot in raw.split(",")]
return [slot for slot in slots if slot]
@ -175,15 +185,11 @@ def _default_sections() -> List[Dict[str, Any]]:
{"label": "#2", "field": "主轴承#2", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#2"},
{"label": "#3", "field": "主轴承#3", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#3"},
{"label": "#4", "field": "主轴承#4", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#4"},
{"label": "#5", "field": "主轴承#5", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#5"},
{"label": "#6", "field": "主轴承#6", "filters": {"data_type": "LSDAQ"}, "result_key": "主轴承#6"},
]},
{"name": "十字头", "entries": [
{"label": "#1", "field": "十字头#1", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#1"},
{"label": "#2", "field": "十字头#2", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#2"},
{"label": "#3", "field": "十字头#3", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#3"},
{"label": "#4", "field": "十字头#4", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#4"},
{"label": "#5", "field": "十字头#5", "filters": {"data_type": "LSDAQ"}, "result_key": "十字头#5"},
]},
{"name": "减速箱小轴承", "entries": [
{"label": "#1输入法兰端", "field": "减速箱小轴承1", "filters": {"data_type": "LSDAQ"}, "result_key": "减速箱小轴承#1"},
@ -216,18 +222,20 @@ def _query_load_status_timeline(
import pandas as pd
import warnings
from influxdb_client.client.warnings import MissingPivotFunction
except ImportError:
LOGGER.warning("InfluxDB client not available, skip load_status timeline query")
except ImportError as e:
LOGGER.error("InfluxDB客户端导入失败: %s,请安装: pip install influxdb-client pandas", e)
return []
try:
client = InfluxDBClient(url=influx_url, org=influx_org, token=influx_token)
query_api = client.query_api()
# 确保使用UTC时间格式查询
start_rfc = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
end_rfc = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
LOGGER.debug("查询load_status时间范围: %s%s", start_rfc, end_rfc)
# 查询load_status字段的所有数据点在Breaker数据类型中
flux = f'''
from(bucket: "{influx_bucket}")
|> range(start: {start_rfc}, stop: {end_rfc})
@ -253,21 +261,20 @@ from(bucket: "{influx_bucket}")
LOGGER.warning("No load_status timeline data found")
return []
# 转换为时间线数据,确保时区一致性
# 转换为时间线数据保持UTC时区
from datetime import timezone
timeline = []
for _, row in df.iterrows():
time_obj = pd.to_datetime(row['_time'])
# 转换为本地时间去除时区信息与start_time/end_time保持一致
if hasattr(time_obj, 'tz') and time_obj.tz is not None:
# 对于pandas Timestamp先转换为本地时区再转为Python datetime
time_obj = time_obj.tz_convert(None).to_pydatetime()
elif hasattr(time_obj, 'to_pydatetime'):
# 转换为Python datetime对象
time_obj = time_obj.to_pydatetime()
# 确保转换为UTC时区的datetime对象
if hasattr(time_obj, 'tz_localize'):
if time_obj.tz is None:
time_obj = time_obj.tz_localize(timezone.utc)
else:
time_obj = time_obj.tz_convert(timezone.utc)
# 确保没有时区信息
if hasattr(time_obj, 'tzinfo') and time_obj.tzinfo is not None:
time_obj = time_obj.replace(tzinfo=None)
if hasattr(time_obj, 'to_pydatetime'):
time_obj = time_obj.to_pydatetime()
timeline.append({
'time': time_obj,
@ -352,11 +359,11 @@ def _calculate_effective_time_points(
LOGGER.info("Effective running periods: %d periods, total %.3f hours",
len(effective_periods), total_effective_hours)
for period in effective_periods:
LOGGER.debug("Effective period: %s%s (%.3f hours)",
period['start'].strftime('%H:%M:%S'),
period['end'].strftime('%H:%M:%S'),
period['duration_hours'])
# for period in effective_periods:
# LOGGER.debug("Effective period: %s → %s (%.3f hours)",
# period['start'].strftime('%H:%M:%S'),
# period['end'].strftime('%H:%M:%S'),
# period['duration_hours'])
# 3. 计算每个时间槽对应的真实时间点
effective_time_points = {}
@ -368,24 +375,10 @@ def _calculate_effective_time_points(
effective_time_points[slot_str] = None
continue
# 如果目标时间 >= 总有效时间(允许小的浮点误差),使用最后一个有效时间段的结束时间
# 这样可以处理边界情况:实验正好运行了目标时长,但由于浮点精度可能略小于目标值
tolerance = 0.01 # 允许 0.01 小时的容差
if target_effective_hours >= total_effective_hours - tolerance:
if effective_periods:
# 使用最后一个有效时间段的结束时间
last_period = effective_periods[-1]
target_time_point = last_period['end']
effective_time_points[slot_str] = target_time_point
LOGGER.info("Slot %s: effective %.3fh >= total %.3fh, using last period end time %s",
slot_str, target_effective_hours, total_effective_hours,
target_time_point.strftime('%H:%M:%S'))
else:
# 如果没有有效时间段,使用实验结束时间
effective_time_points[slot_str] = end_time
LOGGER.info("Slot %s: effective %.3fh >= total %.3fh, using experiment end time %s",
slot_str, target_effective_hours, total_effective_hours,
end_time.strftime('%H:%M:%S') if end_time else "N/A")
if target_effective_hours > total_effective_hours:
# LOGGER.warning("Target effective time %.3fh exceeds total effective time %.3fh for slot %s",
# target_effective_hours, total_effective_hours, slot_str)
effective_time_points[slot_str] = None
continue
# 在有效时间段中查找累计运行target_effective_hours小时的时间点
@ -439,8 +432,11 @@ def _query_influxdb_range_with_load_status(
client = InfluxDBClient(url=influx_url, org=influx_org, token=influx_token)
query_api = client.query_api()
# 确保使用UTC时间格式
start_rfc = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
end_rfc = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
LOGGER.debug("查询字段 %s 时间范围: %s%s", field_name, start_rfc, end_rfc)
# 构建过滤条件
tag_filters = ""
@ -518,102 +514,151 @@ def _query_influxdb_with_load_status(
influx_measurement: str,
filters: Optional[Dict[str, str]] = None,
) -> Optional[float]:
"""查询 InfluxDB 获取指定字段在指定时间点的瞬时值(仅当 load_status = 1 时)"""
"""查询 InfluxDB 获取指定字段在指定时间点的瞬时值(仅当 load_status = 1 时)
逻辑
1. ±window 内同时查询温度数据和 load_status 数据
2. 对每个温度数据点查找其最近的前一个 load_status 读数判断是否为 1
3. 仅保留 load_status=1 期间的温度数据点
4. 在有效数据点中选取最接近 target_time 的瞬时值
5. 如果当前窗口无有效数据逐步扩大窗口重试102030min
"""
try:
from influxdb_client import InfluxDBClient
import pandas as pd
import numpy as np
import warnings
from influxdb_client.client.warnings import MissingPivotFunction
except ImportError:
LOGGER.warning("InfluxDB client not available, skip query for field=%s", field_name)
return None
client = None
try:
client = InfluxDBClient(url=influx_url, org=influx_org, token=influx_token)
query_api = client.query_api()
LOGGER.debug(
"Querying field=%s measurement=%s target_time=%s filters=%s (with load_status=1)",
"查询字段=%s 目标时间=%s (UTC) 过滤器=%s",
field_name,
influx_measurement,
target_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
filters or {},
)
# 查询逻辑:查询目标时间点之前(包含目标时间点)的数据,获取最接近目标时间点的瞬时值
# 使用实验开始时间作为查询起点,目标时间点作为查询终点,确保获取该时间点的瞬时数值
# 需要从实验开始时间查询,因为有效时间点是基于累计运行时间计算的
# 获取实验开始时间(需要从环境变量或传入参数获取)
# 为了简化,我们使用一个合理的时间窗口:从目标时间点往前推足够长的时间
# 但为了精确,我们应该查询到目标时间点为止,取最后一条
window_minutes = 60 # 往前查询60分钟确保能覆盖到数据
query_start = target_time - timedelta(minutes=window_minutes)
# 查询终点设置为目标时间点,确保获取的是该时间点或之前的数据
query_end = target_time
query_start_rfc = query_start.strftime('%Y-%m-%dT%H:%M:%SZ')
query_end_rfc = query_end.strftime('%Y-%m-%dT%H:%M:%SZ')
# 构建过滤条件
# 构建 tag 过滤条件
tag_filters = ""
if filters:
for key, value in filters.items():
tag_filters += f'\n |> filter(fn: (r) => r["{key}"] == "{value}")'
# 查询温度数据:查询到目标时间点为止,取最后一条(最接近目标时间点的瞬时值)
flux = f'''
# 逐步扩大窗口查找 load_status=1 的有效数据
for window_minutes in [10, 20, 30]:
query_start = target_time - timedelta(minutes=window_minutes)
query_end = target_time + timedelta(minutes=window_minutes)
start_rfc = query_start.strftime('%Y-%m-%dT%H:%M:%SZ')
end_rfc = query_end.strftime('%Y-%m-%dT%H:%M:%SZ')
LOGGER.debug("查询窗口 ±%dmin: %s%s", window_minutes, start_rfc, end_rfc)
# 查询温度数据(全部点位,按时间排序)
temp_flux = f'''
from(bucket: "{influx_bucket}")
|> range(start: {query_start_rfc}, stop: {query_end_rfc})
|> range(start: {start_rfc}, stop: {end_rfc})
|> filter(fn: (r) => r["_measurement"] == "{influx_measurement}")
|> filter(fn: (r) => r["_field"] == "{field_name}"){tag_filters}
|> sort(columns: ["_time"])
|> last()
|> yield(name: "instantaneous_at_effective_time")
'''.strip()
LOGGER.debug("Flux查询语句:\n%s", flux)
# 查询同窗口内的 load_status 时间线
status_flux = f'''
from(bucket: "{influx_bucket}")
|> range(start: {start_rfc}, stop: {end_rfc})
|> filter(fn: (r) => r["_measurement"] == "{influx_measurement}")
|> filter(fn: (r) => r["data_type"] == "Breaker")
|> filter(fn: (r) => r["_field"] == "load_status")
|> sort(columns: ["_time"])
'''.strip()
with warnings.catch_warnings():
warnings.simplefilter("ignore", MissingPivotFunction)
frames = query_api.query_data_frame(flux)
if isinstance(frames, list):
df = pd.concat(frames, ignore_index=True) if frames else pd.DataFrame()
else:
df = frames
LOGGER.debug("温度Flux:\n%s", temp_flux)
LOGGER.debug("状态Flux:\n%s", status_flux)
# 获取瞬时值(最近的一个有效数据点)
if df.empty or '_value' not in df.columns:
LOGGER.debug("No valid data found for field=%s at effective time point", field_name)
return None
# 取第一行的值因为查询已经排序并取了last()
instant_value = df['_value'].iloc[0]
if pd.isna(instant_value):
LOGGER.debug("Instantaneous value is NaN for field=%s", field_name)
return None
with warnings.catch_warnings():
warnings.simplefilter("ignore", MissingPivotFunction)
temp_frames = query_api.query_data_frame(temp_flux)
status_frames = query_api.query_data_frame(status_flux)
value = float(instant_value)
# 如果有时间信息,记录实际的数据时间点
if '_time' in df.columns:
actual_time = df['_time'].iloc[0]
LOGGER.debug("Field=%s instantaneous_value=%.3f actual_time=%s (at effective time)",
field_name, value, actual_time)
else:
LOGGER.debug("Field=%s instantaneous_value=%.3f (at effective time)", field_name, value)
return value
# 合并结果
if isinstance(temp_frames, list):
temp_df = pd.concat(temp_frames, ignore_index=True) if temp_frames else pd.DataFrame()
else:
temp_df = temp_frames
if isinstance(status_frames, list):
status_df = pd.concat(status_frames, ignore_index=True) if status_frames else pd.DataFrame()
else:
status_df = status_frames
if temp_df.empty or '_value' not in temp_df.columns or '_time' not in temp_df.columns:
LOGGER.debug("±%dmin 窗口无温度数据 field=%s", window_minutes, field_name)
continue
if status_df.empty or '_value' not in status_df.columns or '_time' not in status_df.columns:
LOGGER.debug("±%dmin 窗口无 load_status 数据 field=%s", window_minutes, field_name)
continue
# 构建 load_status 时间线转换为int64纳秒时间戳避免类型问题
status_times = pd.to_datetime(status_df['_time']).values.astype('datetime64[ns]').astype(np.int64)
status_values = status_df['_value'].values.astype(float)
# 对每个温度数据点,用最近的前一个 load_status 判断是否有效
temp_df = temp_df.copy()
temp_df['_time_ns'] = pd.to_datetime(temp_df['_time']).values.astype('datetime64[ns]').astype(np.int64)
valid_mask = []
for t_ns in temp_df['_time_ns']:
# 找 <= t_ns 的最后一个 load_status 读数
prior_idx = np.searchsorted(status_times, t_ns, side='right') - 1
if prior_idx >= 0:
valid_mask.append(status_values[prior_idx] == 1.0)
else:
# 没有更早的读数,用最近的一个
nearest_idx = np.argmin(np.abs(status_times - t_ns))
valid_mask.append(status_values[nearest_idx] == 1.0)
valid_df = temp_df[valid_mask]
if valid_df.empty:
LOGGER.debug("±%dmin 窗口内无 load_status=1 的温度数据 field=%s", window_minutes, field_name)
continue
# 在有效点中选取最接近 target_time 的瞬时值
target_ns = np.datetime64(target_time, 'ns').astype(np.int64)
diffs = np.abs(valid_df['_time_ns'].values - target_ns)
closest_idx = np.argmin(diffs)
instant_value = valid_df.iloc[closest_idx]['_value']
if pd.isna(instant_value):
LOGGER.debug("最近有效点值为 NaN field=%s", field_name)
continue
value = float(instant_value)
actual_time = valid_df.iloc[closest_idx]['_time']
LOGGER.debug(
"Field=%s value=%.3f actual_time=%s (load_status=1, ±%dmin窗口, 有效点%d/%d)",
field_name, value, actual_time, window_minutes,
len(valid_df), len(temp_df),
)
return value
LOGGER.warning("扩窗到±30min仍无 load_status=1 数据 field=%s", field_name)
return None
except Exception as e:
LOGGER.error("Error querying InfluxDB for field=%s: %s", field_name, e)
return None
finally:
try:
client.close()
except Exception:
pass
if client:
try:
client.close()
except Exception:
pass
def _load_temperature_data_with_load_status(
@ -675,8 +720,12 @@ def _load_temperature_data_with_load_status(
target_time_point = effective_time_points.get(slot_str)
if target_time_point is None:
LOGGER.warning("No effective time point calculated for slot %s, skipping", slot_str)
continue
LOGGER.warning("No effective time point for slot %s, using simple offset", slot_str)
slot_hours = _parse_time_slot(slot_str)
target_time_point = start_time + timedelta(hours=slot_hours)
if target_time_point > end_time:
LOGGER.warning("Time point %s exceeds end time, skipping", slot_str)
continue
LOGGER.debug("Processing slot %s at effective time point %s",
slot_str, target_time_point.strftime('%Y-%m-%d %H:%M:%S'))
@ -845,10 +894,15 @@ def build_temperature_table_with_load_status(_: Dict[str, Any]) -> Dict[str, Any
start_str = os.environ.get("EXPERIMENT_START", "").strip()
if start_str and start_time:
try:
# 使用与原始脚本相同的时间处理逻辑
utc_aware_dt = datetime.strptime(start_str, "%Y-%m-%dT%H:%M:%S%z")
local_dt1 = utc_aware_dt.astimezone(tz=None)
local_dt2 = utc_aware_dt.astimezone(tz=None) + timedelta(hours=3.5)
# 尝试带时区和不带时区两种格式
try:
utc_aware_dt = datetime.strptime(start_str, "%Y-%m-%dT%H:%M:%S%z")
local_dt1 = utc_aware_dt.astimezone(tz=None)
except ValueError:
# 不带时区,直接解析为本地时间
local_dt1 = datetime.strptime(start_str, "%Y-%m-%dT%H:%M:%S")
local_dt2 = local_dt1 + timedelta(hours=3.5)
start_time_value = local_dt1.strftime("%Y-%m-%d %H:%M:%S")
end_time_value = local_dt2.strftime("%Y-%m-%d %H:%M:%S")
cells.append({"row": start_time_row, "col": start_time_value_col, "value": start_time_value})
@ -878,7 +932,7 @@ def build_temperature_table_with_load_status(_: Dict[str, Any]) -> Dict[str, Any
)
# 确保value不是None避免Word COM操作异常与原始脚本一致
if value is not None:
cells.append({"row": 0, "col": 1, "value": f"{value:.1f}"})
cells.append({"row": 0, "col": 1, "value": f"{value:.1f}"})
else:
cells.append({"row": 0, "col": 1, "value": ""})

View File

@ -0,0 +1,300 @@
# 任务配置
task:
period: 210 # 单位分钟
control_reg_addr: 1200
# gps 配置
gps:
port: /dev/ttyLP4
baudrate: 9600
timeout: 50
# breaker断路器配置
breaker:
indicator:
port: /dev/ttyUSB_LIGHT
baudrate: 9600
timeout: 50
port: /dev/ttyUSB_BREAKER
baudrate: 9600
timeout: 10
task_start_threshold: 5000 #int, 判定任务开始的有功功率阈值/w
task_stop_threshold: 3000 #int, 判定任务结束的有功功率阈值/w
duration: 3 #int电流持续时间/s
OVV: 300 #int, 过压值/V
UVV: 150 #int欠压值/V
OCV: 10000 #int, 过流值/0.01A
LCV: 90 #int漏电值/1mA
OTV: 150 #int过温值/℃
OPV: 13000 #int过载有功功率/W
OVT: 4000 #int过压动作时间/0.1s
UVT: 4000 #int欠压动作时间/0.1s
LCT: 4000 #int漏电动作时间/0.1s
OCT: 4000 #int过流动作时间/0.1s
OPT: 4000 #int有功过载动作时间/0.1s
OTT: 4000 #int过温动作时间/0.1s
# lsdaq 配置
lsdaq:
# Modbus-RTU 配置
port: /dev/ttyLP3 # 串口号如COM3或'/dev/ttyLP3'
baudrate: 115200 # 波特率
timeout: 1 # 超时时间(秒)
mode: 0 # 工作模式0-work或1-calib
# 配置采集通道传感器类型
# 用1位标识传感器类型16通道16位组成1个uint16数据。CH1->CH16
# 0PT100; 1: 4-20mA电流型传感器;
sensor_type: '0000 0000 0000 0000'
sensor_Tmp_CalibParam:
# PT100传感器对应的K值和T值
CH1: {K2: 0, K: 0.0311314349267159, B: -536.209396150856}
CH2: {K2: 0, K: 0.0311931732683759, B: -538.060652944714}
CH3: {K2: 0, K: 0.0310081286336704, B: -533.809292492742}
CH4: {K2: 0, K: 0.0312335701465264, B: -538.760137686226}
CH5: {K2: 0, K: 0.0311077027362215, B: -536.798383628092}
CH6: {K2: 0, K: 0.0311166612575919, B: -536.877851004226}
CH7: {K2: 0, K: 0.0309995873314733, B: -534.326957548871}
CH8: {K2: 0, K: 0.031204654529397, B: -539.09245272611}
CH9: {K2: 0, K: 0.0311948194176528, B: -538.034587271267}
CH10: {K2: 0, K: 0.030996596669504, B: -534.701789148401}
CH11: {K2: 0, K: 0.0310055444481469, B: -535.504834782268}
CH12: {K2: 0, K: 0.0310551132865325, B: -536.24377288418}
CH13: {K2: 0, K: 0.0310239427307571, B: -535.807161638956}
CH14: {K2: 0, K: 0.0313454593571509, B: -541.451349369065}
CH15: {K2: 0, K: 0.0313738619404098, B: -541.684813710032}
CH16: {K2: 0, K: 0.0311081891835453, B: -536.64715117882}
sensor_Cur_CalibParam:
# 4~20mA传感器对应的K值和T值
CH1: {K2: 0.0, K: 1.0, B: 0.0}
CH2: {K2: 0.0, K: 1.0, B: 0.0}
CH3: {K2: 0.0, K: 1.0, B: 0.0}
CH4: {K2: 0.0, K: 1.0, B: 0.0}
CH5: {K2: 0.0, K: 1.0, B: 0.0}
CH6: {K2: 0.0, K: 1.0, B: 0.0}
CH7: {K2: 0.0, K: 1.0, B: 0.0}
CH8: {K2: 0.0, K: 1.0, B: 0.0}
CH9: {K2: 0.0, K: 1.0, B: 0.0}
CH10: {K2: 0.0, K: 1.0, B: 0.0}
CH11: {K2: 0.0, K: 1.0, B: 0.0}
CH12: {K2: 0.0, K: 1.0, B: 0.0}
CH13: {K2: 0.0, K: 1.0, B: 0.0}
CH14: {K2: 0.0, K: 1.0, B: 0.0}
CH15: {K2: 0.0, K: 1.0, B: 0.0}
CH16: {K2: 0.0, K: 1.0, B: 0.0}
sensor_Pres_CalibParam:
# mA->PSI转换对应的K值和T值
CH1: {K2: 0.0, K: 1.0, B: 0.0}
CH2: {K2: 0.0, K: 1.0, B: 0.0}
CH3: {K2: 0.0, K: 1.0, B: 0.0}
CH4: {K2: 0.0, K: 1.0, B: 0.0}
CH5: {K2: 0.0, K: 1.0, B: 0.0}
CH6: {K2: 0.0, K: 1.0, B: 0.0}
CH7: {K2: 0.0, K: 1.0, B: 0.0}
CH8: {K2: 0.0, K: 1.0, B: 0.0}
CH9: {K2: 0.0, K: 1.0, B: 0.0}
CH10: {K2: 0.0, K: 1.0, B: 0.0}
CH11: {K2: 0.0, K: 1.0, B: 0.0}
CH12: {K2: 0.0, K: 1.0, B: 0.0}
CH13: {K2: 0.0, K: 1.0, B: 0.0}
CH14: {K2: 0.0, K: 1.0, B: 0.0}
CH15: {K2: 0.0, K: 1.0, B: 0.0}
CH16: {K2: 0.0, K: 1.0, B: 0.0}
# 报警参数设置
warning_param:
# 是否启用报警
enable: '1111 0111 1111 1111' #1-启用0-禁用
CH1: {lower: -20.0, upper: 110.0}
CH2: {lower: -20.0, upper: 110.0}
CH3: {lower: -20.0, upper: 110.0}
CH4: {lower: -20.0, upper: 110.0}
CH5: {lower: -20.0, upper: 110.0}
CH6: {lower: -20.0, upper: 110.0}
CH7: {lower: -20.0, upper: 110.0}
CH8: {lower: -20.0, upper: 110.0}
CH9: {lower: -20.0, upper: 110.0}
CH10: {lower: -20.0, upper: 110.0}
CH11: {lower: -20.0, upper: 110.0}
CH12: {lower: -20.0, upper: 110.0}
CH13: {lower: -20.0, upper: 110.0}
CH14: {lower: -20.0, upper: 110.0}
CH15: {lower: -20.0, upper: 110.0}
CH16: {lower: -20.0, upper: 50.0}
# 通道别名设置
alias:
CH1: '主轴承#1'
CH2: '主轴承#2'
CH3: '主轴承#3'
CH4: '主轴承#4'
CH5: '主轴承#5'
CH6: '主轴承#6'
CH7: '十字头#1'
CH8: '十字头#2'
CH9: '十字头#3'
CH10: '十字头#4'
CH11: '十字头#5'
CH12: '减速箱小轴承1'
CH13: '减速箱小轴承#2'
CH14: '减速箱大轴承#3'
CH15: '减速箱大轴承#4'
CH16: '环境温度'
# hsdaq 配置
hsdaq:
host: 192.168.0.2
port: 8080
local_host: 192.168.0.3
local_port: 8080
timeout: 50 # 单位ms
channels: 16
sample_time: 8 # 单位ms
sample_period: 1000 # 单位ms
one_sample_time: 100 # 单位us
# 配置高频采集通道传感器类型
# 用2位标识传感器类型16通道32位组成1个uint32数据。CH1->CH16
# 00NPN或PNP型开关量; 01电压型传感器; 10:4-20mA电流型传感器; 11振动传感器
sensor_type: '1010 1011 1011 1011 1011 1010 1010 0100'
frame_size_max: 1464 # 最大包长
file_size: 32000000 # 最大文件大小
file_type: 0 # 0-csv或1-bin
output_dir: data # 文件保存目录
min_free_gb: 1 # 最小剩余磁盘空间单位GB
# 选择保存数据的通道1-保存数据, 0-不保存CH1->CH16
save_flag: '0000 0000 0000 0000'
daq_board_no: '2504210002'
feature_type: "rms"
min_vol_cur_phy_value: 0.0
max_vol_cur_phy_value: 160.0
vol_cur_phy_scale: 1
mode: 0 # 工作模式0-'work'或1-'calib'
# 4~20mA传感器对应的K值和T值
sensor_Cur_CalibParam:
CH1: {K2: 0.0, K: 0.00258263, B: 0.001601482}
CH2: {K2: 0.0, K: 0.002572228, B: 0.000180365}
CH3: {K2: 0.0, K: 0.002577854, B: 0.003481423}
CH4: {K2: 0.0, K: 0.002574779, B: 0.001034803}
CH5: {K2: 0.0, K: 0.002563052, B: 0.000328752}
CH6: {K2: 0.0, K: 0.002573372, B: -5.67175e-06}
CH7: {K2: 0.0, K: 0.002580244, B: 0.001414032}
CH8: {K2: 0.0, K: 0.002578148, B: 0.001619703}
CH9: {K2: 0.0, K: 0.002582191, B: 0.000300086}
CH10: {K2: 0.0, K: 0.002572029, B: 0.000148142}
CH11: {K2: 0.0, K: 0.002576609, B: 0.001021399}
# CH12: {K2: 0.0, K: 0.001928556, B: 0.003335270}
CH12: {K2: 0.0, K: 0.01903484772, B: -78.80708088}
CH13: {K2: 0.0, K: 1.0, B: 0.0}
# CH14: {K2: 0.0, K: 0.001923312, B: -0.001293004}
CH14: {K2: 0.0, K: 0.0435167228946, B: -90.5329674119268}
CH15: {K2: 0.0, K: 0.002580323, B: 0.000544915}
CH16: {K2: 0.0, K: 0.002573487, B: 0.000873064}
# 电压传感器对应的K值和T值
sensor_Vol_CalibParam:
CH1: {K2: 0.0, K: 1.0, B: 0.0}
CH2: {K2: 0.0, K: 1.0, B: 0.0}
CH3: {K2: 0.0, K: 1.0, B: 0.0}
CH4: {K2: 0.0, K: 1.0, B: 0.0}
CH5: {K2: 0.0, K: 1.0, B: 0.0}
CH6: {K2: 0.0, K: 1.0, B: 0.0}
CH7: {K2: 0.0, K: 1.0, B: 0.0}
CH8: {K2: 0.0, K: 1.0, B: 0.0}
CH9: {K2: 0.0, K: 1.0, B: 0.0}
CH10: {K2: 0.0, K: 1.0, B: 0.0}
CH11: {K2: 0.0, K: 1.0, B: 0.0}
CH12: {K2: 0.0, K: 1.0, B: 0.0}
CH13: {K2: 0.0, K: 1.0, B: 0.0}
CH14: {K2: 0.0, K: 1.0, B: 0.0}
CH15: {K2: 0.0, K: 1.0, B: 0.0}
CH16: {K2: 0.0, K: 1.0, B: 0.0}
# 振动传感器对应的K值和T值
sensor_Vib_CalibParam:
CH1: {K2: 0.0, K: 0.000980181688598713, B: 0.784199472182921}
CH2: {K2: 0.0, K: 0.000979536991333191, B: 0.758179588312897}
CH3: {K2: 0.0, K: 0.000980321826962675, B: 0.747037511177572}
CH4: {K2: 0.0, K: 0.000980792974240141, B: 0.757538218907948}
CH5: {K2: 0.0, K: 0.000980973262504023, B: 0.806926311144011}
CH6: {K2: 0.0, K: 0.000982175606057935, B: 0.785563011832194}
CH7: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH8: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH9: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH10: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH11: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH12: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH13: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH14: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH15: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
CH16: {K2: 0.0, K: 0.000980663833333333, B: 0.773234764062384}
# 报警参数设置
warning_param:
# 是否启用报警
enable: '0000000000010100' # 1-启用0-禁用
CH1: {lower: 0.0, upper: 1.0}
CH2: {lower: 0.0, upper: 1.0}
CH3: {lower: 0.0, upper: 1.0}
CH4: {lower: 0.0, upper: 1.0}
CH5: {lower: 0.0, upper: 1.0}
CH6: {lower: 0.0, upper: 1.0}
CH7: {lower: 0.0, upper: 1.0}
CH8: {lower: 0.0, upper: 1.0}
CH9: {lower: 0.0, upper: 1.0}
CH10: {lower: 0.0, upper: 1.0}
CH11: {lower: 0.0, upper: 1.0}
CH12: {lower: -20.0, upper: 80.0}
CH13: {lower: 0.0, upper: 1.0}
CH14: {lower: 10.0, upper: 100.0}
CH15: {lower: 0.0, upper: 1.0}
CH16: {lower: 0.0, upper: 1.0}
calib_params:
vibration:
frequency: 500 # 标定时振动频率单位Hz
alias:
# CH1: ''
# CH2: ''
# CH3: ''
CH4: '振动1'
# CH5: ''
CH6: '振动2'
# CH7: ''
CH8: '振动3'
# CH9: ''
CH10: '振动4'
# CH11: ''
CH12: '润滑油温'
# CH13: ''
CH14: '润滑油压'
# CH15: ''
# CH16: ''
modbus-server:
host: 10.0.21.88
port: 5020
timeout: 50
# 通过Modbus TCP协议提供gps、lsdaq和hsdaq特征值数据
plc-server:
host: 192.168.1.200
port: 5020
timeout: 500
slave_id: 1
# 从PLC读取的物理量
measurements:
pressure:
address: 100
type: float32
value: 0.6914023756980896
warning_param:
lower: 0.0
upper: 1.0
enable: 1
warning: 0
flow:
address: 104
type: float32
value: 0.2740088403224945
warning_param:
lower: 0.0
upper: 1.0
enable: 1
warning: 0
influxdb:
url: http://10.0.21.88:8086
token: 4nOdMJpKXQXAGoLDYYdRYDMxoKaEpqchzkqCQnYmgMqkQVDO3zRfaO5ifaCx90HbIRRuMZtgaUKWKNqyUD1hEg==
org: MEASCON
active: true
bucket: PCM
config-server:
host: 0.0.0.0
port: 5000

View File

@ -0,0 +1,101 @@
# version: '1.0'
services:
pcmv1:
image: pcmv1:v1.0
container_name: pcmv1
command: ["/bin/bash", "-c", " source .venv/bin/activate && stty -F /dev/ttyUSB_LIGHT raw && stty -F /dev/ttyUSB_BREAKER raw && python3 src/pcm-influxdb-debug.py"]
network_mode: host
depends_on:
- influxdb
privileged: true
restart: unless-stopped
mem_limit: 256M
cpuset: "0"
# ports:
# - "0.0.0.0:5000:5000"
volumes:
- /home/torizon/src:/pcmv1/src
# - /mnt/ssd_data/pcmv1:/pcmv1/data
- /home/torizon/data:/pcmv1/data
- /dev:/dev:ro
tty: true
stdin_open: true
environment:
- TZ=Asia/Shanghai
deploy:
mode: replicated
replicas: 1
group_add:
- dialout
influxdb:
image: influxdb:v1.0
container_name: influxdb
restart: unless-stopped
# depends_on:
# - alpine
ports:
- "8086:8086"
mem_limit: 256M
cpuset: "1"
environment:
DOCKER_INFLUXDB_INIT_MODE: "setup"
DOCKER_INFLUXDB_INIT_USERNAME: "PCM"
DOCKER_INFLUXDB_INIT_PASSWORD: "1842moon" # 请修改密码
DOCKER_INFLUXDB_INIT_ORG: "MEASCON"
DOCKER_INFLUXDB_INIT_BUCKET: "PCM"
volumes:
- "/mnt/ssd_data/influxdb:/var/lib/influxdb2" # 数据持久化
- "/home/torizon/src/influxdb/config:/etc/influxdb2" # 配置持久化(可选)
# hdtestor:
# image: hdtestor:V0.1
# container_name: hdtestor
# command: ["/bin/bash", "-c", "/hdtestor/scripts/auto_partition_sda.sh"]
# network_mode: host
# privileged: true
# devices:
# - "/dev:/dev"
# restart: "no"
# mem_limit: 256M
# cpuset: "2"
# volumes:
# - /home/torizon/src/pcmv1/bash_scripts:/hdtestor/scripts
# environment:
# - TZ=Asia/Shanghai
alpine:
image: alpine:v1.0
container_name: alpine
command: ["sh", "-c", "ls /app -la && /app/auto_partition_sda.sh"]
network_mode: host
privileged: true
devices:
- "/dev:/dev"
restart: no
mem_limit: 256M
cpuset: "2"
volumes:
- /home/torizon/bash_scripts:/app
environment:
- TZ=Asia/Shanghai
# pcmv1_flask:
# image: pcm_flask_v1:latest
# container_name: pcm_flask_v1
# command: python src/app.pyc
# network_mode: host
# privileged: true
# restart: unless-stopped
# mem_limit: 512M
# cpuset: "2-3"
# volumes:
# - /home/torizon/app:/app
# - /mnt/ssd_data/pcmv1:/app/data
# tty: true
# stdin_open: true
# environment:
# - TZ=Asia/Shanghai
# deploy:
# mode: replicated
# replicas: 1

View File

@ -733,8 +733,15 @@ class SerialClient:
self.serial = None
def open(self):
self.serial = serial.Serial(self.port, self.baudrate, timeout=self.timeout)
return self.serial.is_open
try:
self.serial = serial.Serial(self.port, self.baudrate, timeout=self.timeout)
if self.logger:
self.logger.info(f"[{nowStr()}] Serial opened: {self.port}, baudrate={self.baudrate}, timeout={self.timeout}")
return self.serial.is_open
except Exception as e:
if self.logger:
self.logger.error(f"[{nowStr()}] Serial open failed: {self.port}, err={e}")
return False
def close(self):
if self.serial and self.serial.is_open:
@ -793,10 +800,19 @@ class IndicatorController:
'turnOffAlarm': ['', "0105 00A1 0000", 8, 100, 1, 1, 3],
}
self.alarm = 0
self.client.open()
opened = self.client.open()
if not opened and self.logger:
self.logger.warning(f"[{nowStr()}] Indicator serial not open, port={config.get('port', '/dev/ttyUSB1')}")
def exe(self, name):
return self.client.exeCmd(self.cmdList[name])
ret = self.client.exeCmd(self.cmdList[name])
if self.logger:
if ret[0]:
self.logger.info(f"[{nowStr()}] Indicator cmd ok: {name}")
self.logger.debug(ret[2])
else:
self.logger.warning(f"[{nowStr()}] Indicator cmd failed: {name}; {ret[2]}")
return ret
def alarming(self, closed):
"""报警时:红灯亮+蜂鸣器响,绿灯灭"""

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff