iiabm-core/risk_analysis_sum_result.py

135 lines
4.9 KiB
Python
Raw Normal View History

2023-09-10 22:35:48 +08:00
from orm import engine
import pandas as pd
import networkx as nx
import json
import matplotlib.pyplot as plt
# prep data
Firm = pd.read_csv("input_data\\Firm_amended.csv")
Firm['Code'] = Firm['Code'].astype('string')
Firm.fillna(0, inplace=True)
BomNodes = pd.read_csv('input_data\\BomNodes.csv', index_col=0)
with open('SQL_analysis_risk.sql', 'r') as f:
str_sql = f.read()
result = pd.read_sql(sql=str_sql,
con=engine)
result.to_csv('output_result\\risk\\count.csv',
index=False,
encoding='utf-8-sig')
print(result)
# G bom
plt.rcParams['font.sans-serif'] = 'SimHei'
exp_id = 1
G_bom_str = pd.read_sql(
sql=f'select g_bom from iiabmdb.without_exp_experiment '
f'where id = {exp_id};',
con=engine)['g_bom'].tolist()[0]
G_bom = nx.adjacency_graph(json.loads(G_bom_str))
pos = nx.nx_agraph.graphviz_layout(G_bom, prog="twopi", args="")
node_labels = nx.get_node_attributes(G_bom, 'Name')
# rename node 1
2023-10-26 09:21:35 +08:00
# node_labels['1'] = '工业互联网'
# node_labels['1.1'] = '工业自动化硬件'
# node_labels['1.4'] = '工业互联网安全管理'
# node_labels['1.2.1'] = '网络互联服务'
# node_labels['1.2.2'] = '标识解析服务'
# node_labels['1.2.3'] = '数据互通服务'
# node_labels['1.3.1'] = '设计研发软件'
# node_labels['1.3.2'] = '采购供应软件'
# node_labels['1.3.3'] = '生产制造软件'
# node_labels['1.3.4'] = '企业运营软件'
# node_labels['1.3.5'] = '仓储物流软件'
2023-09-10 22:35:48 +08:00
plt.figure(figsize=(12, 12), dpi=300)
nx.draw_networkx_nodes(G_bom, pos)
nx.draw_networkx_edges(G_bom, pos)
nx.draw_networkx_labels(G_bom, pos, labels=node_labels, font_size=6)
# plt.show()
plt.savefig(f"output_result\\risk\\g_bom_exp_id_{exp_id}.png")
plt.close()
# G firm
plt.rcParams['font.sans-serif'] = 'SimHei'
sample_id = 1
G_firm_str = pd.read_sql(
sql=f'select g_firm from iiabmdb.without_exp_sample where id = {exp_id};',
con=engine)['g_firm'].tolist()[0]
G_firm = nx.adjacency_graph(json.loads(G_firm_str))
pos = nx.nx_agraph.graphviz_layout(G_firm, prog="twopi", args="")
# desensitize
2023-09-17 06:20:49 +08:00
node_label = nx.get_node_attributes(G_firm, 'Revenue_Log')
2023-09-10 22:35:48 +08:00
node_label = {
key: key
for key in node_label.keys()
}
node_size = list(nx.get_node_attributes(G_firm, 'Revenue_Log').values())
node_size = list(map(lambda x: x**2, node_size))
edge_label = nx.get_edge_attributes(G_firm, "Product")
edge_label = {(n1, n2): label for (n1, n2, _), label in edge_label.items()}
plt.figure(figsize=(12, 12), dpi=300)
nx.draw(G_firm, pos, node_size=node_size, labels=node_label, font_size=6)
nx.draw_networkx_edge_labels(G_firm, pos, edge_label, font_size=4)
# plt.show()
plt.savefig(f"output_result\\risk\\g_firm_sample_id_{exp_id}_de.png")
plt.close()
# count firm product
count_firm_prod = result.value_counts(subset=['id_firm', 'id_product'])
count_firm_prod.name = 'count'
count_firm_prod = count_firm_prod.to_frame().reset_index()
count_firm_prod.to_csv('output_result\\risk\\count_firm_prod.csv',
index=False,
encoding='utf-8-sig')
print(count_firm_prod)
# count firm
count_firm = count_firm_prod.groupby('id_firm')['count'].sum()
count_firm = count_firm.to_frame().reset_index()
count_firm.sort_values('count', inplace=True, ascending=False)
count_firm.to_csv('output_result\\risk\\count_firm.csv',
index=False,
encoding='utf-8-sig')
print(count_firm)
# count product
count_prod = count_firm_prod.groupby('id_product')['count'].sum()
count_prod = count_prod.to_frame().reset_index()
count_prod.sort_values('count', inplace=True, ascending=False)
count_prod.to_csv('output_result\\risk\\count_prod.csv',
index=False,
encoding='utf-8-sig')
print(count_prod)
# DCP disruption causing probability
result_disrupt_ts_above_0 = result[result['ts'] > 0]
print(result_disrupt_ts_above_0)
result_dcp = pd.DataFrame(columns=[
's_id', 'up_id_firm', 'up_id_product', 'down_id_firm', 'down_id_product'
])
for sid, group in result.groupby('s_id'):
ts_start = max(group['ts'])
while ts_start >= 1:
ts_end = ts_start - 1
while ts_end >= 0:
up = group.loc[group['ts'] == ts_end, ['id_firm', 'id_product']]
down = group.loc[group['ts'] == ts_start,
['id_firm', 'id_product']]
for _, up_row in up.iterrows():
for _, down_row in down.iterrows():
row = [sid]
row += up_row.tolist()
row += down_row.tolist()
result_dcp.loc[len(result_dcp.index)] = row
ts_end -= 1
ts_start -= 1
count_dcp = result_dcp.value_counts(
subset=['up_id_firm', 'up_id_product', 'down_id_firm', 'down_id_product'])
count_dcp.name = 'count'
count_dcp = count_dcp.to_frame().reset_index()
count_dcp.to_csv('output_result\\risk\\count_dcp.csv',
index=False, encoding='utf-8-sig')
print(count_dcp)