Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
U
unify_api2
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
chaonan
unify_api2
Commits
fd3450a0
Commit
fd3450a0
authored
May 19, 2023
by
lcn
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
修改安电管理版
parent
91a67783
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
393 additions
and
536 deletions
+393
-536
constants.py
unify_api/constants.py
+4
-1
list_alarm_service.py
...y_api/modules/alarm_manager/service/list_alarm_service.py
+17
-11
list_alarm.py
unify_api/modules/alarm_manager/views/list_alarm.py
+36
-11
common_cps.py
unify_api/modules/common/procedures/common_cps.py
+84
-115
health_score.py
unify_api/modules/common/procedures/health_score.py
+107
-143
points.py
unify_api/modules/common/procedures/points.py
+11
-10
count_info_proxy_pds.py
..._api/modules/home_page/procedures/count_info_proxy_pds.py
+116
-187
security_info_pds.py
unify_api/modules/home_page/procedures/security_info_pds.py
+12
-54
count_info_proxy.py
unify_api/modules/home_page/views/count_info_proxy.py
+4
-3
security_info.py
unify_api/modules/home_page/views/security_info.py
+2
-1
No files found.
unify_api/constants.py
View file @
fd3450a0
...
...
@@ -474,3 +474,6 @@ ELECTRIC_PARAM_MAP = {
"unbalanceU"
,
# 三相电压不平衡度
"overPR"
}
CST
=
"Asia/Shanghai"
\ No newline at end of file
unify_api/modules/alarm_manager/service/list_alarm_service.py
View file @
fd3450a0
...
...
@@ -145,7 +145,7 @@ async def list_alarm_zdu_service(cid, point_list, page_num, page_size, start,
results
=
await
list_alarm_zdu_dao_new15
(
cid
,
point_list
,
start
,
end
,
importance
,
event_type
)
real_total
=
len
(
results
)
results
=
results
[(
page_num
-
1
)
*
page_size
,
page_num
*
page_size
]
results
=
results
[(
page_num
-
1
)
*
page_size
,
page_num
*
page_size
]
# 2. 获取工厂, 报警type对应的描述信息
event_dic
=
await
company_extend_dao
(
cid
)
event_dic_map
=
{
event
[
"key"
]:
event
for
event
in
event_dic
}
...
...
@@ -253,7 +253,10 @@ async def list_alarm_service_new15(cid, point_id, start, end, importance,
if
point_id
:
li
.
append
(
f
"pid={point_id}"
)
else
:
li
.
append
(
f
"cid={cid}"
)
if
not
isinstance
(
cid
,
list
):
cid
=
[
cid
]
cid_where
=
str
(
tuple
(
cid
))
.
replace
(
",)"
,
")"
)
li
.
append
(
f
"cid in {cid_where}"
)
if
start
and
end
:
li
.
append
(
f
"event_datetime BETWEEN '{start}' and '{end}'"
)
if
importance
:
...
...
@@ -270,7 +273,7 @@ async def list_alarm_service_new15(cid, point_id, start, end, importance,
li
.
append
(
f
"event_type in {str(tuple(alarm_type)).strip(',')}"
)
mid_sql
=
" and "
.
join
(
li
)
total
=
await
get_total_list_alarm_dao
(
mid_sql
)
mid_sql2
=
" and "
.
join
([
"point_1min_event."
+
i
for
i
in
li
])
mid_sql2
=
" and "
.
join
([
"point_1min_event."
+
i
for
i
in
li
])
datas
=
await
get_list_alarm_dao
(
mid_sql2
,
page_size
,
page_num
)
rows
=
[]
for
data
in
datas
:
...
...
@@ -279,7 +282,10 @@ async def list_alarm_service_new15(cid, point_id, start, end, importance,
type_str
=
constants
.
EVENT_TYPE_MAP
.
get
(
event_type
)
location_id
=
data
.
get
(
"lid"
)
es_id
=
data
.
get
(
"id"
)
if
location_id
and
(
event_type
in
constants
.
TEMP_SCOPE_URL_TYPE
):
if
point_id
and
data
.
get
(
"event_mode"
)
==
"scope"
:
url
=
"/scope_details?doc_id=
%
s"
%
es_id
redirect_type
=
"scope"
elif
location_id
and
type
in
constants
.
TEMP_SCOPE_URL_TYPE
:
url
=
"/temp_trend?doc_id=
%
s"
%
es_id
redirect_type
=
"temp_trend"
else
:
...
...
unify_api/modules/alarm_manager/views/list_alarm.py
View file @
fd3450a0
...
...
@@ -18,6 +18,7 @@ from unify_api.modules.alarm_manager.service.list_alarm_service import \
wx_list_alarm_zdu_service
,
list_alarm_service_new15
from
unify_api.modules.common.procedures.cids
import
get_cid_info
,
get_cids
,
\
get_proxy_cids
from
unify_api.modules.users.procedures.jwt_user
import
jwt_user
from
unify_api.utils
import
time_format
from
unify_api
import
constants
from
pot_libs.common.components.query
import
PageRequest
,
Equal
,
Range
,
Filter
,
\
...
...
@@ -55,7 +56,31 @@ async def post_list_alarm(req, body: PageRequest) -> ListAlarmResponse:
alarm_type
=
in_group
.
group
elif
in_group
.
field
==
'importance'
:
importance
=
in_group
.
group
return
await
list_alarm_service_new15
(
cid
,
point_id
,
start
,
end
,
cids
=
[]
if
req
.
json
.
get
(
"product"
)
==
Product
.
AndianUManage
.
value
:
proxy_id
=
req
.
json
.
get
(
"proxy_id"
)
product
=
req
.
json
.
get
(
"product"
)
user_id
=
jwt_user
(
req
)
req_cids
=
req
.
json
.
get
(
"cids"
)
# cids = await get_cids(user_id, product)
proxy_cids
=
await
get_proxy_cids
(
user_id
,
product
,
proxy_id
)
cids
=
list
(
set
(
req_cids
)
&
set
(
proxy_cids
))
if
req
.
json
.
get
(
"product"
)
in
[
Product
.
RecognitionElectric
.
value
,
Product
.
IntelligentU
.
value
]:
if
not
cid
:
product
=
req
.
json
.
get
(
"product"
)
user_id
=
jwt_user
(
req
)
cids
=
await
get_cids
(
user_id
,
product
)
else
:
cids
=
[
cid
]
if
not
cids
and
cid
:
cids
=
[
cid
]
if
not
cids
:
raise
BusinessException
(
message
=
f
"你没有工厂权限"
)
return
await
list_alarm_service_new15
(
cids
,
point_id
,
start
,
end
,
importance
,
page_size
,
page_num
,
alarm_type
)
...
...
@@ -127,7 +152,8 @@ async def post_list_alarm_bak(req, body: PageRequest) -> ListAlarmResponse:
)
query_body
=
EsQuery
()
.
query
(
page_request
)
if
not
query_body
.
get
(
"query"
):
query
=
{
"bool"
:
{
"must_not"
:
[{
"terms"
:
{
"mode.keyword"
:
[
"scope"
]}}]}}
query
=
{
"bool"
:
{
"must_not"
:
[{
"terms"
:
{
"mode.keyword"
:
[
"scope"
]}}]}}
query_body
[
"query"
]
=
query
else
:
must_not
=
[{
"terms"
:
{
"mode.keyword"
:
[
"scope"
]}}]
...
...
@@ -206,7 +232,6 @@ async def post_new_list_alarm(req, body: NlaReq) -> ListAlarmResponse:
product
)
@
summary
(
"小程序消息列表"
)
async
def
post_wx_list_alarm
(
req
,
body
:
WlaReq
)
->
ListAlarmResponse
:
# 1. 获取参数
...
...
unify_api/modules/common/procedures/common_cps.py
View file @
fd3450a0
...
...
@@ -7,6 +7,7 @@ from pot_libs.es_util.es_utils import EsUtil
from
pot_libs.logger
import
log
from
pot_libs.mysql_util.mysql_util
import
MysqlUtil
from
unify_api
import
constants
from
unify_api.constants
import
CST
def
point_day2month
(
dt
):
...
...
@@ -24,55 +25,35 @@ def point_day2month(dt):
async
def
today_alarm_cnt
(
cids
):
filters
=
[
{
"terms"
:
{
"cid"
:
cids
}},
{
"term"
:
{
"mode"
:
"alarm"
}},
]
start_time
=
pendulum
.
today
(
tz
=
"Asia/Shanghai"
)
es_end_time
=
start_time
.
subtract
(
days
=-
1
)
.
format
(
"YYYY-MM-DDTHH:mm:ss+08:00"
)
es_start_time
=
start_time
.
format
(
"YYYY-MM-DDTHH:mm:ss+08:00"
)
filters
.
append
({
"range"
:
{
"datetime"
:
{
"gte"
:
es_start_time
,
"lt"
:
es_end_time
,}}},)
query_body
=
{
"query"
:
{
"bool"
:
{
"filter"
:
filters
}},
"size"
:
0
,
"aggs"
:
{
"cid_aggs"
:
{
"terms"
:
{
"field"
:
"cid"
,
"size"
:
10000
},
"aggs"
:
{
"date_alarms"
:
{
"date_histogram"
:
{
"field"
:
"datetime"
,
"order"
:
{
"_key"
:
"desc"
},
"min_doc_count"
:
0
,
"interval"
:
"day"
,
"format"
:
"yyyy-MM-dd"
,
"time_zone"
:
"+08:00"
,
}
}
},
}
},
}
async
with
EsUtil
()
as
es
:
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
constants
.
POINT_1MIN_EVENT
)
es_end_time
=
start_time
.
subtract
(
days
=-
1
)
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
es_start_time
=
start_time
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
cid_buckets
=
es_result
.
get
(
"aggregations"
,
{})
.
get
(
"cid_aggs"
,
{})
.
get
(
"buckets"
,
[])
cid_bucket_map
=
{
bucket
[
"key"
]:
bucket
for
bucket
in
cid_buckets
}
sql
=
f
"""
select cid,count(*) count
from point_1min_event pe
left join event_type et on pe.event_type = et.e_type
where cid in
%
s and et.mode = 'alarm' and event_datetime >=
%
s
and event_datetime <
%
s
group by cid
"""
async
with
MysqlUtil
()
as
conn
:
datas
=
await
conn
.
fetchall
(
sql
=
sql
,
args
=
(
cids
,
es_start_time
,
es_end_time
))
cid_bucket_map
=
{
i
[
"cid"
]:
i
[
"count"
]
for
i
in
datas
}
now_time
=
datetime
.
now
()
cid_alarm_map
=
{
cid
:
{
"today_alarm_count"
:
0
}
for
cid
in
cids
}
for
cid
in
cids
:
bucket
=
{}
if
cid
in
cid_bucket_map
:
bucket
=
cid_bucket_map
[
cid
]
date_alarm_bucket
=
bucket
.
get
(
"date_alarms"
,
{})
.
get
(
"buckets"
,
[])
for
i
in
date_alarm_bucket
:
if
i
[
"key_as_string"
]
==
str
(
now_time
)[:
10
]:
cid_alarm_map
[
cid
][
"today_alarm_count"
]
+=
i
[
"doc_count"
]
alarm_count
=
cid_bucket_map
.
get
(
"cid"
)
or
0
cid_alarm_map
[
cid
][
"today_alarm_count"
]
+=
alarm_count
return
cid_alarm_map
async
def
proxy_safe_run_info
(
cids
,
start_time_str
=
None
,
end_time_str
=
None
):
async
def
proxy_safe_run_info
(
cids
,
start_time_str
=
None
,
end_time_str
=
None
):
"""
批量获取 各个工厂的安全运行天数以及今日报警数, 如果是获取月份的,那么计算这个月的安全运行天数
:param cids:
...
...
@@ -83,83 +64,67 @@ async def proxy_safe_run_info(cids, start_time_str=None, end_time_str=None):
# {"term": {"mode": "alarm"}},
{
"term"
:
{
"importance"
:
1
}},
]
where
=
""
start_dt
,
end_dt
,
start_ts
,
end_ts
=
None
,
None
,
0
,
0
now_dt
=
pendulum
.
now
(
tz
=
CST
)
if
start_time_str
and
end_time_str
:
start_dt
=
datetime
.
strptime
(
start_time_str
,
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
end_dt
=
datetime
.
strptime
(
end_time_str
,
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
now
=
datetime
.
now
()
if
end_dt
>
now
:
end_dt
=
now
es_start_str
=
datetime
(
year
=
start_dt
.
year
,
month
=
start_dt
.
month
,
day
=
start_dt
.
day
)
.
strftime
(
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S+08:00"
)
es_end_str
=
end_dt
.
strftime
(
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S+08:00"
)
filters
.
append
({
"range"
:
{
"datetime"
:
{
"gte"
:
es_start_str
,
"lt"
:
es_end_str
,}}},)
query_body
=
{
"query"
:
{
"bool"
:
{
"filter"
:
filters
}},
"size"
:
0
,
"aggs"
:
{
"cid_aggs"
:
{
"terms"
:
{
"field"
:
"cid"
,
"size"
:
10000
},
"aggs"
:
{
"date_alarms"
:
{
"date_histogram"
:
{
"field"
:
"datetime"
,
"order"
:
{
"_key"
:
"desc"
},
"min_doc_count"
:
0
,
"interval"
:
"day"
,
"format"
:
"yyyy-MM-dd"
,
"time_zone"
:
"+08:00"
,
}
}
},
}
},
}
async
with
EsUtil
()
as
es
:
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
constants
.
POINT_1MIN_EVENT
)
start_dt
=
pendulum
.
parse
(
start_time_str
)
end_dt
=
pendulum
.
parse
(
end_time_str
)
start_ts
=
start_dt
.
int_timestamp
end_ts
=
end_dt
.
int_timestamp
now_ts
=
now_dt
.
int_timestamp
if
end_ts
>
now_ts
:
end_time_str
=
now_dt
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
where
+=
f
" and event_datetime>= '{start_time_str}' and "
\
f
"event_datetime < '{end_time_str}' "
sql
=
f
"""
select cid,date_format(event_datetime,"
%%
Y-
%%
m-
%%
d") fmt_day,
count(*) count
from point_1min_event
where cid in
%
s {where}
group by cid,date_format(event_datetime,"
%%
Y-
%%
m-
%%
d")
"""
async
with
MysqlUtil
()
as
conn
:
datas
=
await
conn
.
fetchall
(
sql
=
sql
,
args
=
(
cids
,))
now_time
=
datetime
.
now
()
# 获取到工厂安装时间create_time
async
with
MysqlUtil
()
as
conn
:
company_sql
=
"select cid, create_time from company where cid in
%
s"
companys
=
await
conn
.
fetchall
(
company_sql
,
(
cids
,))
create_time_timestamp_map
=
{
company
[
"cid"
]:
datetime
.
fromtimestamp
(
company
[
"create_time"
])
for
company
in
companys
company
[
"cid"
]:
pendulum
.
from_timestamp
(
company
[
"create_time"
],
tz
=
CST
)
for
company
in
companys
}
cid_alarm_map
=
{
cid
:
{
"today_alarm_count"
:
0
,
"safe_run_days"
:
0
}
for
cid
in
cids
}
cid_buckets
=
es_result
.
get
(
"aggregations"
,
{})
.
get
(
"cid_aggs"
,
{})
.
get
(
"buckets"
,
[])
cid_bucket_map
=
{
bucket
[
"key"
]:
bucket
for
bucket
in
cid_buckets
}
cid_alarm_map
=
{
cid
:
{
"today_alarm_count"
:
0
,
"safe_run_days"
:
0
}
for
cid
in
cids
}
cid_alarm_count_dict
=
dict
()
for
data
in
datas
:
cid
=
data
.
get
(
"cid"
)
if
cid
not
in
cid_alarm_count_dict
:
cid_alarm_count_dict
[
cid
]
=
0
elif
data
.
get
(
"count"
)
>
0
:
cid_alarm_count_dict
[
cid
]
+=
1
for
cid
in
cids
:
create_
time
=
create_time_timestamp_map
[
cid
]
total_days
=
(
now_
time
-
create_time
)
.
days
+
1
create_
dt
=
create_time_timestamp_map
[
cid
]
total_days
=
(
now_
dt
-
create_dt
)
.
days
+
1
if
start_time_str
and
end_time_str
:
# 计算一段时间内安全运行天数,总天数的逻辑稍微不一样
total_days
=
(
end_dt
.
date
()
-
start_dt
.
date
())
.
days
+
1
if
create_time
>
start_dt
and
create_time
<
end_dt
:
total_days
=
(
end_dt
.
date
()
-
create_time
.
date
())
.
days
+
1
elif
create_time
>
end_dt
:
total_days
=
(
end_dt
-
start_dt
)
.
days
+
1
create_ts
=
create_dt
.
int_timestamp
if
start_ts
<
create_ts
<
end_ts
:
total_days
=
(
end_dt
-
create_dt
)
.
days
+
1
elif
create_ts
>
end_ts
:
total_days
=
0
has_alarm_days
=
0
bucket
=
{}
if
cid
in
cid_bucket_map
:
bucket
=
cid_bucket_map
[
cid
]
date_alarm_bucket
=
bucket
.
get
(
"date_alarms"
,
{})
.
get
(
"buckets"
,
[])
for
i
in
date_alarm_bucket
:
if
i
[
"doc_count"
]
!=
0
:
# 没有报警,看做是安全运行了,统计累计安全运行的天数
has_alarm_days
+=
1
has_alarm_days
=
cid_alarm_count_dict
.
get
(
"cid"
)
or
0
safe_run_days
=
total_days
-
has_alarm_days
cid_alarm_map
[
cid
][
"safe_run_days"
]
=
safe_run_days
cid_alarm_map
[
cid
][
"total_days"
]
=
total_days
today_alarm_map
=
await
today_alarm_cnt
(
cids
)
for
cid
in
cid_alarm_map
:
cid_alarm_map
[
cid
][
"today_alarm_count"
]
=
today_alarm_map
[
cid
][
"today_alarm_count"
]
cid_alarm_map
[
cid
][
"today_alarm_count"
]
=
today_alarm_map
[
cid
][
"today_alarm_count"
]
return
cid_alarm_map
...
...
@@ -178,9 +143,10 @@ async def alarm_time_distribution(company_ids, start, end):
HOUR (pevent.event_datetime)
"""
async
with
MysqlUtil
()
as
conn
:
datas
=
await
conn
.
fetchall
(
sql
,
args
=
(
company_ids
,
))
datas
=
await
conn
.
fetchall
(
sql
,
args
=
(
company_ids
,))
time_distribution_map
=
{
"day_alarm_cnt"
:
0
,
"night_alarm_cnt"
:
0
,
"morning_alarm_cnt"
:
0
}
time_distribution_map
=
{
"day_alarm_cnt"
:
0
,
"night_alarm_cnt"
:
0
,
"morning_alarm_cnt"
:
0
}
for
data
in
datas
:
hour
=
int
(
data
[
"event_hour"
])
if
hour
>=
6
and
hour
<
18
:
...
...
@@ -195,7 +161,8 @@ async def alarm_time_distribution(company_ids, start, end):
async
def
alarm_time_distribution_old
(
company_ids
,
start
,
end
):
start_dt
=
datetime
.
strptime
(
start
,
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
end_dt
=
datetime
.
strptime
(
end
,
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
es_start_str
=
datetime
(
year
=
start_dt
.
year
,
month
=
start_dt
.
month
,
day
=
start_dt
.
day
)
.
strftime
(
es_start_str
=
datetime
(
year
=
start_dt
.
year
,
month
=
start_dt
.
month
,
day
=
start_dt
.
day
)
.
strftime
(
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S+08:00"
)
es_end_str
=
end_dt
.
strftime
(
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S+08:00"
)
...
...
@@ -227,10 +194,12 @@ async def alarm_time_distribution_old(company_ids, start, end):
log
.
info
(
"alarm time distribute query_body={}"
.
format
(
query_body
))
async
with
EsUtil
()
as
es
:
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
constants
.
POINT_1MIN_EVENT
)
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
constants
.
POINT_1MIN_EVENT
)
print
(
f
"alarm time distribute es_result = {es_result}"
)
buckets
=
es_result
[
"aggregations"
][
"cid_aggs"
][
"buckets"
]
or
[]
time_distribution_map
=
{
"day_alarm_cnt"
:
0
,
"night_alarm_cnt"
:
0
,
"morning_alarm_cnt"
:
0
}
time_distribution_map
=
{
"day_alarm_cnt"
:
0
,
"night_alarm_cnt"
:
0
,
"morning_alarm_cnt"
:
0
}
for
i
in
buckets
:
cid_buckets
=
i
.
get
(
"time_alarms"
,
{})
.
get
(
"buckets"
,
[])
for
item
in
cid_buckets
:
...
...
unify_api/modules/common/procedures/health_score.py
View file @
fd3450a0
...
...
@@ -9,7 +9,8 @@ from pot_libs.logger import log
from
pot_libs.mysql_util.mysql_util
import
MysqlUtil
from
unify_api.modules.common.dao.health_score_dao
import
\
health_score_points_aggs
,
get_point_dats_dao
,
get_mean_datas_dao
from
unify_api.modules.common.procedures.points
import
get_points
from
unify_api.modules.common.procedures.points
import
get_points
,
\
get_points_new15
from
unify_api.modules.electric.procedures.electric_util
import
\
batch_get_wiring_type
from
unify_api.modules.home_page.procedures
import
point_inlines
...
...
@@ -117,9 +118,11 @@ async def load_health_radar(cid, param_point_id=None):
"SELECT pid, mtid FROM point WHERE pid IN
%
s order by pid, create_time asc"
)
async
with
MysqlUtil
()
as
conn
:
change_meter_records
=
await
conn
.
fetchall
(
sql
,
args
=
(
tuple
(
all_point_ids
),))
change_meter_records
=
await
conn
.
fetchall
(
sql
,
args
=
(
tuple
(
all_point_ids
),))
point_mid_map
=
{
i
[
"pid"
]:
i
[
"mtid"
]
for
i
in
change_meter_records
if
i
[
"mtid"
]
is
not
None
i
[
"pid"
]:
i
[
"mtid"
]
for
i
in
change_meter_records
if
i
[
"mtid"
]
is
not
None
}
# 获取meter_param_record中的标准电压
...
...
@@ -128,7 +131,8 @@ async def load_health_radar(cid, param_point_id=None):
if
all_mids
:
async
with
MysqlUtil
()
as
conn
:
sql
=
"SELECT mtid, vc, voltage_side, ctnum FROM point WHERE mtid IN
%
s order by mtid, create_time asc"
meter_param_records
=
await
conn
.
fetchall
(
sql
,
args
=
(
tuple
(
all_mids
),))
meter_param_records
=
await
conn
.
fetchall
(
sql
,
args
=
(
tuple
(
all_mids
),))
meter_param_map
=
{
i
[
"mtid"
]:
i
for
i
in
meter_param_records
}
log
.
info
(
f
"all_mids={all_mids}"
)
...
...
@@ -253,7 +257,8 @@ async def load_health_radar(cid, param_point_id=None):
lf_score
,
)
if
not
thdu_score
:
thdu_score
=
(
v_score
+
freq_score
+
ubl_score
+
costtl_score
+
lf_score
)
/
5.0
thdu_score
=
(
v_score
+
freq_score
+
ubl_score
+
costtl_score
+
lf_score
)
/
5.0
# 存入redis
score_info
=
{
...
...
@@ -441,54 +446,26 @@ async def load_manage_health_radar(cids, recent_days=30):
# 计算最近30天时间起始
today
=
pendulum
.
today
()
start_time
=
str
(
today
.
subtract
(
days
=
recent_days
))
end_time
=
str
(
today
.
subtract
(
seconds
=
1
))
company_point_map
=
await
get_points
(
cids
)
all_point_map
=
{
point_id
:
point_info
for
i
in
company_point_map
.
values
()
for
point_id
,
point_info
in
i
.
items
()
}
start_time
=
today
.
subtract
(
days
=
recent_days
)
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
end_time
=
str
(
today
.
subtract
(
seconds
=
1
))
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
company_point_map
=
await
get_points_new15
(
cids
)
all_point_map
=
dict
()
for
cid
,
points
in
company_point_map
.
items
():
for
pid
,
point_info
in
points
.
items
():
all_point_map
[
pid
]
=
point_info
all_point_ids
=
list
(
all_point_map
.
keys
())
query_body
=
{
"query"
:
{
"bool"
:
{
"filter"
:
[
{
"terms"
:
{
"pid"
:
all_point_ids
}},
{
"range"
:
{
"quarter_time"
:
{
"gte"
:
start_time
,
"lte"
:
end_time
,}}},
],
}
},
"size"
:
0
,
"aggs"
:
{},
}
for
point_id
in
all_point_ids
:
ctnum
=
all_point_map
[
point_id
][
"meter_param"
][
"ctnum"
]
if
ctnum
==
3
:
stats_items
=
[
"ua_mean"
,
"freq_mean"
,
"ubl_mean"
,
"costtl_mean"
,
"thdua_mean"
,
"lf_mean"
,
]
else
:
stats_items
=
[
"uab_mean"
,
"freq_mean"
,
"ubl_mean"
,
"costtl_mean"
,
"lf_mean"
]
aggs_stats
=
{}
for
stats_item
in
stats_items
:
aggs_stats
[
stats_item
]
=
{
"stats"
:
{
"field"
:
stats_item
}}
query_body
[
"aggs"
][
f
"point_id_{point_id}_aggs"
]
=
{
"filter"
:
{
"term"
:
{
"pid"
:
point_id
}},
"aggs"
:
aggs_stats
,
}
async
with
EsUtil
()
as
es
:
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
POINT_15MIN_INDEX
)
sql
=
f
"""
select pid,avg(ua_mean) ua_mean,avg(uab_mean) uab_mean,avg(freq_mean) freq_mean,
avg(ubl_mean) ubl_mean,avg(costtl_mean) costtl_mean,
avg(thdua_mean) thdua_mean,avg(lf_mean) lf_mean from
point_15min_electric
where pid in
%
s and create_time >=
%
s and create_time <=
%
s
group by pid
"""
async
with
MysqlUtil
()
as
conn
:
datas
=
await
conn
.
fetchall
(
sql
,
args
=
(
all_point_ids
,
start_time
,
end_time
))
data_map
=
{
i
[
'pid'
]:
i
for
i
in
datas
}
# 单独计算每个公司的健康指数
company_score_map
=
{}
...
...
@@ -508,7 +485,7 @@ async def load_manage_health_radar(cids, recent_days=30):
continue
inline_point_ids
,
point_ids
=
[],
[]
for
point_id
,
point_item
in
point_map
.
items
():
if
point_item
[
"inlid
_belongedto
"
]:
if
point_item
[
"inlid"
]:
inline_point_ids
.
append
(
point_id
)
else
:
point_ids
.
append
(
point_id
)
...
...
@@ -516,20 +493,15 @@ async def load_manage_health_radar(cids, recent_days=30):
# 1. 电压偏差评分
total
,
total_score
=
0
,
0
for
point_id
in
inline_point_ids
+
point_ids
:
ua_mean
=
(
es_result
.
get
(
"aggregations"
,
{})
.
get
(
f
"point_id_{point_id}_aggs"
,
{})
.
get
(
"ua_mean"
,
{})
.
get
(
"avg"
)
)
data_point_map
=
data_map
.
get
(
point_id
)
if
not
data_point_map
:
continue
ua_mean
=
data_point_map
.
get
(
"ua_mean"
)
if
ua_mean
is
None
:
continue
point_info
=
all_point_map
[
point_id
]
if
not
point_info
[
"meter_param"
]:
# 没有参数的装置, 拆了?
continue
meter_param
=
point_info
[
"meter_param"
]
meter_vc
,
ctnum
=
meter_param
.
get
(
"vc"
),
meter_param
.
get
(
"ctnum"
)
or
3
meter_vc
,
ctnum
=
point_info
.
get
(
"vc"
),
point_info
.
get
(
"ctnum"
)
or
3
if
meter_vc
:
stand_voltage
=
meter_vc
/
sqrt
(
3
)
if
ctnum
==
3
else
meter_vc
else
:
...
...
@@ -546,12 +518,10 @@ async def load_manage_health_radar(cids, recent_days=30):
# 2. 频率偏差评分
total
,
total_score
=
0
,
0
for
point_id
in
inline_point_ids
+
point_ids
:
freq_mean
=
(
es_result
.
get
(
"aggregations"
,
{})
.
get
(
f
"point_id_{point_id}_aggs"
,
{})
.
get
(
"freq_mean"
,
{})
.
get
(
"avg"
)
)
data_point_map
=
data_map
.
get
(
point_id
)
if
not
data_point_map
:
continue
freq_mean
=
data_point_map
.
get
(
"freq_mean"
)
if
freq_mean
is
None
:
continue
...
...
@@ -566,12 +536,10 @@ async def load_manage_health_radar(cids, recent_days=30):
# 3. 三相[电压]不平衡评分
total
,
total_score
=
0
,
0
for
point_id
in
inline_point_ids
+
point_ids
:
ubl_avg
=
(
es_result
.
get
(
"aggregations"
,
{})
.
get
(
f
"point_id_{point_id}_aggs"
,
{})
.
get
(
"ubl_mean"
,
{})
.
get
(
"avg"
)
)
data_point_map
=
data_map
.
get
(
point_id
)
if
not
data_point_map
:
continue
ubl_avg
=
data_point_map
.
get
(
"ubl_mean"
)
if
ubl_avg
is
None
:
continue
score
=
get_dev_score
(
dev_type
=
"ubl"
,
cur
=
ubl_avg
)
...
...
@@ -588,12 +556,10 @@ async def load_manage_health_radar(cids, recent_days=30):
else
:
ids
=
point_ids
for
point_id
in
ids
:
costtl_mean
=
(
es_result
.
get
(
"aggregations"
,
{})
.
get
(
f
"point_id_{point_id}_aggs"
,
{})
.
get
(
"costtl_mean"
,
{})
.
get
(
"avg"
)
)
data_point_map
=
data_map
.
get
(
point_id
)
if
not
data_point_map
:
continue
costtl_mean
=
data_point_map
.
get
(
"costtl_mean"
)
if
costtl_mean
is
None
:
continue
score
=
get_dev_score
(
dev_type
=
"costtl"
,
cur
=
costtl_mean
)
...
...
@@ -607,12 +573,10 @@ async def load_manage_health_radar(cids, recent_days=30):
# 电压谐波畸变:只计算三表法计量点,如果所有监测点都是二表法,则取其他所有指标均值
total
,
total_score
=
0
,
0
for
point_id
in
inline_point_ids
+
point_ids
:
thdua_mean
=
(
es_result
.
get
(
"aggregations"
,
{})
.
get
(
f
"point_id_{point_id}_aggs"
,
{})
.
get
(
"thdua_mean"
,
{})
.
get
(
"avg"
)
)
data_point_map
=
data_map
.
get
(
point_id
)
if
not
data_point_map
:
continue
thdua_mean
=
data_point_map
.
get
(
"thdua_mean"
)
if
thdua_mean
is
None
:
continue
score
=
get_dev_score
(
dev_type
=
"thdu"
,
cur
=
thdua_mean
)
...
...
@@ -625,12 +589,10 @@ async def load_manage_health_radar(cids, recent_days=30):
# 5. 负载率
total
,
total_score
=
0
,
0
for
point_id
in
inline_point_ids
+
point_ids
:
lf_mean
=
(
es_result
.
get
(
"aggregations"
,
{})
.
get
(
f
"point_id_{point_id}_aggs"
,
{})
.
get
(
"lf_mean"
,
{})
.
get
(
"avg"
)
)
data_point_map
=
data_map
.
get
(
point_id
)
if
not
data_point_map
:
continue
lf_mean
=
data_point_map
.
get
(
"lf_mean"
)
if
lf_mean
is
None
:
score
=
100
else
:
...
...
@@ -652,7 +614,8 @@ async def load_manage_health_radar(cids, recent_days=30):
lf_score
,
)
if
not
thdu_score
:
thdu_score
=
(
v_score
+
freq_score
+
ubl_score
+
costtl_score
+
lf_score
)
/
5.0
thdu_score
=
(
v_score
+
freq_score
+
ubl_score
+
costtl_score
+
lf_score
)
/
5.0
company_score_map
[
cid
]
=
{
"v_score"
:
v_score
,
...
...
@@ -692,5 +655,6 @@ async def load_manage_health_index(company_score_info):
sub_costtl
=
(
1
-
score_info
[
"costtl_score"
]
/
100.0
)
*
20
sub_thdu
=
(
1
-
score_info
[
"thdu_score"
]
/
100.0
)
*
20
sub_ubl
=
(
1
-
score_info
[
"ubl_score"
]
/
100.0
)
*
20
company_index_map
[
cid
]
=
100
-
sub_dev
-
sub_lf
-
sub_costtl
-
sub_thdu
-
sub_ubl
company_index_map
[
cid
]
=
100
-
sub_dev
-
sub_lf
-
sub_costtl
-
sub_thdu
-
sub_ubl
return
company_index_map
unify_api/modules/common/procedures/points.py
View file @
fd3450a0
...
...
@@ -56,13 +56,14 @@ async def get_points(company_ids):
async
def
get_points_new15
(
cids
):
sql
=
"SELECT p.pid,p.cid,p.inlid FROM `point` p INNER JOIN monitor m "
\
"on m.mtid=p.mtid where p.cid in
%
s and m.demolished=0;"
sql
=
"SELECT p.pid,p.cid,p.inlid,vc,ctnum "
\
"FROM `point` p INNER JOIN "
\
"monitor m on m.mtid=p.mtid where p.cid in
%
s and m.demolished=0;"
async
with
MysqlUtil
()
as
conn
:
points
=
await
conn
.
fetchall
(
sql
,
args
=
(
cids
,))
company_point_map
=
defaultdict
(
dict
)
for
point
in
points
:
company_point_map
[
point
[
"cid"
]][
point
[
"pid"
]]
=
point
s
company_point_map
[
point
[
"cid"
]][
point
[
"pid"
]]
=
point
return
company_point_map
...
...
unify_api/modules/home_page/procedures/count_info_proxy_pds.py
View file @
fd3450a0
import
random
from
datetime
import
datetime
,
timedelta
import
pendulum
from
pot_libs.es_util.es_utils
import
EsUtil
from
pot_libs.logger
import
log
from
pot_libs.mysql_util.mysql_util
import
MysqlUtil
from
pot_libs.utils.pendulum_wrapper
import
my_pendulum
from
unify_api
import
constants
from
unify_api.constants
import
COMPANY_1DAY_POWER
,
EVENT_TYPE_MAP
,
Importance
from
unify_api.constants
import
COMPANY_1DAY_POWER
,
EVENT_TYPE_MAP
,
Importance
,
\
CST
from
unify_api.modules.alarm_manager.dao.list_static_dao
import
\
sdu_alarm_aggs_type
from
unify_api.modules.common.procedures.cids
import
get_cid_info
...
...
@@ -19,19 +20,17 @@ from unify_api.modules.common.procedures.health_score import (
load_manage_health_radar
,
load_manage_health_index
,
)
from
unify_api.modules.common.procedures.points
import
get_points
from
unify_api.modules.common.procedures.points
import
get_points
,
\
get_points_new15
from
unify_api.modules.home_page.procedures.count_info_pds
import
\
datetime_to_timestamp
from
unify_api.utils.es_query_body
import
agg_statistics
from
unify_api.utils.time_format
import
last30_day_range_today
async
def
proxy_alarm_score
(
cids
):
now
=
datetime
.
now
()
end_timestamp
=
datetime_to_timestamp
(
now
)
start_timestamp
=
datetime_to_timestamp
(
datetime
(
now
.
year
,
now
.
month
,
now
.
day
)
-
timedelta
(
30
))
now_dt
=
pendulum
.
now
()
end_time
=
now_dt
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
start_time
=
now_dt
.
subtract
(
days
=
30
)
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
score_events
=
[
i
for
i
in
EVENT_TYPE_MAP
.
keys
()
...
...
@@ -49,50 +48,38 @@ async def proxy_alarm_score(cids):
"under_rms_u"
,
]
]
query_body
=
{
"query"
:
{
"bool"
:
{
"filter"
:
[
{
"terms"
:
{
"cid"
:
cids
}},
{
"terms"
:
{
"type.keyword"
:
score_events
,
}},
{
"range"
:
{
"time"
:
{
"gte"
:
start_timestamp
,
"lte"
:
end_timestamp
,
}}},
],
}
},
"size"
:
0
,
"aggs"
:
{},
}
for
cid
in
cids
:
query_body
[
"aggs"
][
f
"cid_{cid}_aggs"
]
=
{
"filter"
:
{
"term"
:
{
"cid"
:
cid
}},
"aggs"
:
{
"importance"
:
{
"terms"
:
{
"field"
:
"importance"
}}},
}
log
.
info
(
"cal_score_safe_electric query_body={}"
.
format
(
query_body
))
async
with
EsUtil
()
as
es
:
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
constants
.
POINT_1MIN_EVENT
)
sql
=
f
"""
select cid,importance,count(*) count from point_1min_event
where cid in
%
s and event_datetime >=
%
s and event_datetime <=
%
s
and event_type in
%
s
group by cid,importance
"""
log
.
info
(
"cal_score_safe_electric sql={}"
.
format
(
sql
))
async
with
MysqlUtil
()
as
conn
:
datas
=
await
conn
.
fetchall
(
sql
,
args
=
(
cids
,
start_time
,
end_time
,
score_events
))
data_map
=
{
"{}-{}"
.
format
(
i
[
"cid"
],
i
[
"importance"
]):
i
[
"count"
]
for
i
in
datas
}
cid_alarm_score_map
=
{}
for
cid
in
cids
:
cid_aggs_info
=
es_result
.
get
(
"aggregations"
,
{})
.
get
(
f
"cid_{cid}_aggs"
,
{})
if
not
cid_aggs_info
:
cid_alarm_score_map
[
"alarm_score"
]
=
0
first_key
=
"{}-{}"
.
format
(
cid
,
Importance
.
First
.
value
)
second_key
=
"{}-{}"
.
format
(
cid
,
Importance
.
Second
.
value
)
third_key
=
"{}-{}"
.
format
(
cid
,
Importance
.
Third
.
value
)
if
first_key
not
in
data_map
and
second_key
not
in
data_map
and
\
third_key
not
in
data_map
:
cid_alarm_score_map
[
cid
]
=
100
continue
first_alarm_cnt
=
0
second_alarm_cnt
=
0
third_alarm_cnt
=
0
for
bucket
in
cid_aggs_info
.
get
(
"importance"
,
{})
.
get
(
"buckets"
,
[]):
if
bucket
[
"key"
]
==
Importance
.
First
.
value
:
first_alarm_cnt
+=
bucket
[
"doc_count"
]
elif
bucket
[
"key"
]
==
Importance
.
Second
.
value
:
second_alarm_cnt
+=
bucket
[
"doc_count"
]
elif
bucket
[
"key"
]
==
Importance
.
Third
.
value
:
third_alarm_cnt
+=
bucket
[
"doc_count"
]
company_point_map
=
await
get_points
(
cids
)
if
first_key
in
data_map
:
first_alarm_cnt
=
data_map
.
get
(
first_key
)
if
second_key
in
data_map
:
second_alarm_cnt
=
data_map
.
get
(
second_key
)
if
third_key
in
data_map
:
third_alarm_cnt
=
data_map
.
get
(
third_key
)
company_point_map
=
await
get_points_new15
(
cids
)
point_len
=
len
(
company_point_map
.
get
(
cid
)
or
{})
alarm_score
=
(
(
...
...
@@ -138,7 +125,7 @@ async def alarm_percentage_count(cids):
FROM
point_1min_event pevent
WHERE
cid
=
%
s
cid
in
%
s
AND pevent.event_datetime >= '{start}'
AND pevent.event_datetime <= '{end}'
GROUP BY
...
...
@@ -151,7 +138,7 @@ async def alarm_percentage_count(cids):
FROM
point_1min_event pevent
WHERE
cid
=
%
s
cid
in
%
s
AND pevent.event_datetime >= '{start}'
AND pevent.event_datetime <= '{end}'
GROUP BY
...
...
@@ -301,46 +288,23 @@ async def proxy_today_alarm_cnt(cids, group_field="importance"):
:param group_field: example: "importance"
:return:
"""
now
=
datetime
.
now
()
end_timestamp
=
datetime_to_timestamp
(
now
)
start_timestamp
=
datetime_to_timestamp
(
datetime
(
now
.
year
,
now
.
month
,
now
.
day
))
query_body
=
{
"query"
:
{
"bool"
:
{
"filter"
:
[
{
"terms"
:
{
"cid"
:
cids
}},
{
"range"
:
{
"time"
:
{
"gte"
:
start_timestamp
,
"lte"
:
end_timestamp
,
}}},
],
}
},
"size"
:
0
,
"aggs"
:
{},
}
start_time
=
pendulum
.
now
(
tz
=
CST
)
.
start_of
(
unit
=
"day"
)
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
end_time
=
pendulum
.
now
(
tz
=
CST
)
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
if
group_field
==
"type"
:
# 需要关联event_type里面的type
group_field
=
"event_type"
sql
=
f
"""
select cid,{group_field},count(*) count from
point_1min_event
where event_datetime >=
%
s and event_datetime <=
%
s
and cid in
%
s
group by cid,{group_field}
"""
async
with
MysqlUtil
()
as
conn
:
datas
=
await
conn
.
fetchall
(
sql
,
args
=
(
start_time
,
end_time
,
cids
))
int_group_field
=
[
"importance"
,
]
for
cid
in
cids
:
query_body
[
"aggs"
][
f
"cid_{cid}_aggs"
]
=
{
"filter"
:
{
"term"
:
{
"cid"
:
cid
}},
"aggs"
:
{
f
"{group_field}"
:
{
"terms"
:
{
"field"
:
f
"{group_field}"
if
group_field
in
int_group_field
else
f
"{group_field}.keyword"
,
"size"
:
10000
,
}
}
},
}
log
.
info
(
"alarm aggs query_body={}"
.
format
(
query_body
))
async
with
EsUtil
()
as
es
:
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
constants
.
POINT_1MIN_EVENT
)
log
.
info
(
"alarm aggs sql={}"
.
format
(
sql
))
cid_alarm_map
=
{
cid
:
{
...
...
@@ -355,13 +319,6 @@ async def proxy_today_alarm_cnt(cids, group_field="importance"):
}
for
cid
in
cids
}
for
cid
in
cids
:
cid_buckets
=
(
es_result
.
get
(
"aggregations"
,
{})
.
get
(
f
"cid_{cid}_aggs"
,
{})
.
get
(
group_field
,
{})
.
get
(
"buckets"
,
[])
)
alarm_type_map
=
{
Importance
.
First
.
value
:
"first_alarm_cnt"
,
Importance
.
Second
.
value
:
"second_alarm_cnt"
,
...
...
@@ -372,10 +329,12 @@ async def proxy_today_alarm_cnt(cids, group_field="importance"):
"power_quality_low"
:
"electric_quantity"
,
"ele_car_battery"
:
"ele_car_battery"
,
}
for
bucket
in
cid_buckets
:
if
bucket
[
"key"
]
in
alarm_type_map
:
_key
=
alarm_type_map
[
bucket
[
"key"
]]
cid_alarm_map
[
cid
][
_key
]
+=
bucket
[
"doc_count"
]
for
data
in
datas
:
cid
=
data
.
get
(
"cid"
)
key
=
data
.
get
(
group_field
)
if
key
in
alarm_type_map
:
value
=
alarm_type_map
.
get
(
key
)
cid_alarm_map
[
cid
][
value
]
=
data
.
get
(
"count"
)
return
cid_alarm_map
...
...
@@ -385,53 +344,22 @@ async def proxy_today_spfv_cnt(cids):
:param cids:
:return:
"""
now
=
datetime
.
now
()
start_time
=
datetime
(
now
.
year
,
now
.
month
,
now
.
day
)
end_time
=
datetime
(
now
.
year
,
now
.
month
,
now
.
day
,
now
.
hour
,
now
.
minute
,
now
.
second
)
es_start
=
datetime
.
strftime
(
start_time
,
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S+08:00"
)
es_end
=
datetime
.
strftime
(
end_time
,
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S+08:00"
)
query_body
=
{
"query"
:
{
"bool"
:
{
"filter"
:
[
{
"terms"
:
{
"cid"
:
cids
}},
{
"range"
:
{
"quarter_time"
:
{
"gte"
:
es_start
,
"lte"
:
es_end
}}},
]
}
},
"size"
:
0
,
"aggs"
:
{
"cid_aggs"
:
{
# 注意这里size不设置的话,只会返回结果聚合结果10条,也就是cids最多返回10个
"terms"
:
{
"field"
:
"cid"
,
"size"
:
10000
},
"aggs"
:
{
"spfv_aggs"
:
{
"terms"
:
{
"field"
:
"spfv.keyword"
},
"aggs"
:
{
"kwh"
:
{
"sum"
:
{
"field"
:
"kwh"
}}},
}
},
}
},
}
log
.
info
(
"spfv aggs query_body={}"
.
format
(
query_body
))
async
with
EsUtil
()
as
es
:
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
constants
.
COMPANY_15MIN_POWER
)
cid_buckets
=
es_result
.
get
(
"aggregations"
,
{})
.
get
(
"cid_aggs"
,
{})
.
get
(
"buckets"
,
[])
start_time
=
pendulum
.
now
(
tz
=
CST
)
.
start_of
(
unit
=
"day"
)
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
end_time
=
pendulum
.
now
(
tz
=
CST
)
.
format
(
"YYYY-MM-DD HH:mm:ss"
)
sql
=
f
"""
select cid,spfv,sum(kwh) kwh from company_15min_power
where cid in
%
s and create_time >=
%
s and create_time <=
%
s
group by cid,spfv
"""
async
with
MysqlUtil
()
as
conn
:
datas
=
await
conn
.
fetchall
(
sql
,
args
=
(
cids
,
start_time
,
end_time
))
cid_spfv_map
=
{
cid
:
{
"s"
:
0
,
"p"
:
0
,
"f"
:
0
,
"v"
:
0
}
for
cid
in
cids
}
for
bucket
in
cid_buckets
:
cid
=
bucket
.
get
(
"key"
)
spvf_buckets
=
bucket
.
get
(
"spfv_aggs"
,
{})
.
get
(
"buckets"
,
[])
for
i
in
spvf_buckets
:
cid_spfv_map
[
cid
][
i
[
"key"
]]
+=
round
(
i
[
"kwh"
][
"value"
])
for
data
in
datas
:
cid
=
data
.
get
(
"cid"
)
spfv
=
data
.
get
(
"spfv"
)
cid_spfv_map
[
cid
][
spfv
]
=
data
.
get
(
"kwh"
)
log
.
info
(
f
"cid_spfv_map = {cid_spfv_map}"
)
return
cid_spfv_map
...
...
@@ -463,7 +391,8 @@ async def proxy_map_info(cids):
)
# 5. 健康排名
company_score_map
=
await
load_manage_health_radar
(
cids
,
recent_days
=
7
)
company_score_map
=
await
load_manage_health_radar
(
cids
,
recent_days
=
7
)
company_index_map
=
await
load_manage_health_index
(
company_score_map
)
health_index_list
=
sorted
(
[(
round
(
i
),
cid
)
for
cid
,
i
in
company_index_map
.
items
()],
reverse
=
True
...
...
unify_api/modules/home_page/procedures/security_info_pds.py
View file @
fd3450a0
...
...
@@ -175,61 +175,19 @@ async def alarm_summary(company_ids, start, end, date_type):
:param date_type:
:return:
"""
start_dt
=
datetime
.
strptime
(
start
,
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
end_dt
=
datetime
.
strptime
(
end
,
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
es_start_str
=
datetime
(
year
=
start_dt
.
year
,
month
=
start_dt
.
month
,
day
=
start_dt
.
day
)
.
strftime
(
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S+08:00"
)
es_end_str
=
end_dt
.
strftime
(
"
%
Y-
%
m-
%
dT
%
H:
%
M:
%
S+08:00"
)
if
date_type
==
"day"
:
_format
=
"yyyy-MM-dd HH:mm:ss"
_min
=
start_dt
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
_max
=
end_dt
.
strftime
(
"
%
Y-
%
m-
%
d
%
H:
%
M:
%
S"
)
else
:
# date_type == "month"
_format
=
"yyyy-MM-dd"
_min
=
start_dt
.
strftime
(
"
%
Y-
%
m-
%
d"
)
_max
=
end_dt
.
strftime
(
"
%
Y-
%
m-
%
d"
)
filter_list
=
[
{
"range"
:
{
"datetime"
:
{
"gte"
:
es_start_str
,
"lte"
:
es_end_str
,
}}},
{
"term"
:
{
"mode"
:
"alarm"
}},
]
filter_list
.
append
({
"terms"
:
{
"cid"
:
company_ids
}})
query_body
=
{
"query"
:
{
"bool"
:
{
"filter"
:
filter_list
}},
"size"
:
0
,
"aggs"
:
{
"cid_aggs"
:
{
"terms"
:
{
"field"
:
"cid"
,
"size"
:
10000
},
"aggs"
:
{
"date_alarms"
:
{
"date_histogram"
:
{
"field"
:
"datetime"
,
"order"
:
{
"_key"
:
"desc"
},
"min_doc_count"
:
1
,
"interval"
:
"day"
,
"format"
:
"yyyy-MM-dd"
,
"time_zone"
:
"+08:00"
,
}
}
},
}
},
}
log
.
info
(
"alarm_summary query_body={}"
.
format
(
query_body
))
async
with
EsUtil
()
as
es
:
es_result
=
await
es
.
search_origin
(
body
=
query_body
,
index
=
constants
.
POINT_1MIN_EVENT
)
print
(
f
"es_result = {es_result}"
)
buckets
=
es_result
[
"aggregations"
][
"cid_aggs"
][
"buckets"
]
or
[]
sql
=
f
"""
select cid,count(*) count from point_1min_event
where cid in
%
s and event_mode = 'alarm' and event_datetime >=
%
s
and event_datetime <=
%
s
group by cid
"""
log
.
info
(
"alarm_summary sql={}"
.
format
(
sql
))
async
with
MysqlUtil
()
as
conn
:
datas
=
await
conn
.
fetchall
(
sql
,
args
=
(
company_ids
,
start
,
end
))
print
(
f
"datas = {datas}"
)
total_alarm_cnt
,
alarm_company_cnt
=
sum
(
[
i
[
"
doc_count"
]
for
i
in
buckets
]),
len
(
bucket
s
)
cid_alarmcnt_list
=
[
i
[
"
doc_count"
]
for
i
in
bucket
s
]
[
i
[
"
count"
]
for
i
in
datas
]),
len
(
data
s
)
cid_alarmcnt_list
=
[
i
[
"
count"
]
for
i
in
data
s
]
safe_run_map
=
await
proxy_safe_run_info
(
company_ids
,
start_time_str
=
start
,
end_time_str
=
end
)
...
...
unify_api/modules/home_page/views/count_info_proxy.py
View file @
fd3450a0
...
...
@@ -48,6 +48,7 @@ from unify_api.modules.home_page.service.count_info_service import \
safe_run_sdu
,
safe_run_sdu_new15
from
unify_api.modules.elec_charge.components.elec_charge_cps
import
\
ProductProxyReq
from
unify_api.modules.users.procedures.jwt_user
import
jwt_user
@
summary
(
"代理版首页统计信息-安电U"
)
...
...
@@ -55,7 +56,7 @@ async def post_count_info_proxy(req) -> CountInfoProxyResp:
# 1. 获取cid_list
host
=
req
.
host
product
=
PRODUCT
.
get
(
host
)
user_id
=
req
.
ctx
.
user_id
user_id
=
jwt_user
(
req
)
proxy_id
=
req
.
json
.
get
(
"proxy_id"
)
# cid_list = await get_cids(user_id, product)
cid_list
=
await
get_proxy_cids
(
user_id
,
product
,
proxy_id
)
...
...
@@ -105,7 +106,7 @@ async def post_security_level_count(
async
def
post_alarm_percentage_count
(
request
,
body
:
ProxySecurityLevelCntReq
)
->
ProxyAlarmPercentageCntResp
:
user_id
=
request
.
ctx
.
user_id
user_id
=
jwt_user
(
request
)
product
=
body
.
product
req_cid
=
body
.
cid
if
not
req_cid
:
...
...
@@ -137,7 +138,7 @@ async def post_alarm_percentage_count(
@
summary
(
"代理版本首页地图数据"
)
async
def
post_proxy_map_info
(
request
,
body
:
ProxySecurityLevelCntReq
)
->
ProxyIndexMapResp
:
user_id
=
request
.
ctx
.
user_id
user_id
=
jwt_user
(
request
)
product
=
body
.
product
req_cid
=
body
.
cid
if
not
req_cid
:
...
...
unify_api/modules/home_page/views/security_info.py
View file @
fd3450a0
...
...
@@ -23,6 +23,7 @@ from unify_api.modules.home_page.components.security_info_cps import (
from
unify_api.modules.home_page.procedures.security_info_pds
import
(
alarm_summary
,
alarm_count_info_new15
,
)
from
unify_api.modules.users.procedures.jwt_user
import
jwt_user
@
summary
(
"获取首页今日或者近30天安全报警统计信息"
)
...
...
@@ -164,7 +165,7 @@ async def post_alarm_summary(request, body: SecurityCommonReq) -> AlarmSummaryRe
if
not
req_cids
:
raise
BusinessException
(
message
=
f
"暂无工厂"
)
if
product
==
Product
.
AndianUManage
.
value
:
user_id
=
request
.
ctx
.
user_id
user_id
=
jwt_user
(
request
)
# cids = await get_cids(user_id, product)
proxy_id
=
body
.
proxy_id
cids
=
await
get_proxy_cids
(
user_id
,
product
,
proxy_id
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment