mirror of
https://github.com/PiBrewing/craftbeerpi4.git
synced 2024-11-25 00:18:17 +01:00
limit log data transfer to not exceed 2 times max_rows = 1000 rows by removing every nth row. This keeps the user interface operable.
This commit is contained in:
parent
4652b2b516
commit
e95237eef6
1 changed files with 18 additions and 7 deletions
|
@ -116,28 +116,36 @@ class LogController:
|
||||||
for name in names:
|
for name in names:
|
||||||
# get all log names
|
# get all log names
|
||||||
all_filenames = glob.glob('./logs/sensor_%s.log*' % name)
|
all_filenames = glob.glob('./logs/sensor_%s.log*' % name)
|
||||||
|
|
||||||
# concat all logs
|
# concat all logs
|
||||||
df = pd.concat([pd.read_csv(f, parse_dates=True, date_parser=dateparse, index_col='DateTime', names=['DateTime', name], header=None) for f in all_filenames])
|
df = pd.concat([pd.read_csv(f, parse_dates=True, date_parser=dateparse, index_col='DateTime', names=['DateTime', name], header=None) for f in all_filenames])
|
||||||
logging.info("Read all files for {}".format(names))
|
logging.info("Read all files for {}".format(names))
|
||||||
# resample if rate provided
|
# resample if rate provided
|
||||||
if sample_rate is not None:
|
# if sample_rate is not None:
|
||||||
df = df[name].resample(sample_rate).max()
|
# df = df[name].resample(sample_rate).max()
|
||||||
logging.info("Sampled now for {}".format(names))
|
# logging.info("Sampled now for {}".format(names))
|
||||||
df = df.dropna()
|
df = df[name].dropna()
|
||||||
|
# take every nth row so that total number of rows does not exceed max_rows * 2
|
||||||
|
max_rows = 500
|
||||||
|
total_rows = df.shape[0]
|
||||||
|
if (total_rows > 0) and (total_rows > max_rows):
|
||||||
|
nth = int(total_rows/max_rows)
|
||||||
|
if nth > 1:
|
||||||
|
df = df.iloc[::nth]
|
||||||
|
|
||||||
if result is None:
|
if result is None:
|
||||||
result = df
|
result = df
|
||||||
else:
|
else:
|
||||||
result = pd.merge(result, df, how='outer', left_index=True, right_index=True)
|
result = pd.merge(result, df, how='outer', left_index=True, right_index=True)
|
||||||
|
|
||||||
data = {"time": df.index.tolist()}
|
data = {"time": df.index.tolist()}
|
||||||
|
|
||||||
if len(names) > 1:
|
if len(names) > 1:
|
||||||
for name in names:
|
for name in names:
|
||||||
data[name] = result[name].interpolate(limit_direction='both', limit=10).tolist()
|
data[name] = result[name].interpolate(limit_direction='both', limit=10).tolist()
|
||||||
else:
|
else:
|
||||||
data[name] = result.interpolate().tolist()
|
data[name] = result.interpolate().tolist()
|
||||||
|
|
||||||
logging.info("Send Log for {}".format(names))
|
logging.info("Send Log for {}".format(names))
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
async def get_data2(self, ids) -> dict:
|
async def get_data2(self, ids) -> dict:
|
||||||
|
@ -146,7 +154,10 @@ class LogController:
|
||||||
|
|
||||||
result = dict()
|
result = dict()
|
||||||
for id in ids:
|
for id in ids:
|
||||||
df = pd.read_csv("./logs/sensor_%s.log" % id, parse_dates=True, date_parser=dateparse, index_col='DateTime', names=['DateTime',"Values"], header=None)
|
# df = pd.read_csv("./logs/sensor_%s.log" % id, parse_dates=True, date_parser=dateparse, index_col='DateTime', names=['DateTime',"Values"], header=None)
|
||||||
|
# concat all logs
|
||||||
|
all_filenames = glob.glob('./logs/sensor_%s.log*' % id)
|
||||||
|
df = pd.concat([pd.read_csv(f, parse_dates=True, date_parser=dateparse, index_col='DateTime', names=['DateTime', 'Values'], header=None) for f in all_filenames])
|
||||||
result[id] = {"time": df.index.astype(str).tolist(), "value":df.Values.tolist()}
|
result[id] = {"time": df.index.astype(str).tolist(), "value":df.Values.tolist()}
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue