mirror of
https://gitlab.ub.uni-bielefeld.de/sfb1288inf/nopaque.git
synced 2024-11-15 01:05:42 +00:00
Fix cqi_over_socketio not handling cqi status correctly
This commit is contained in:
parent
f037c31b88
commit
be51044059
@ -18,8 +18,8 @@ def cqi_connect(cqi_client: cqi.CQiClient):
|
|||||||
'msg': 'Internal Server Error',
|
'msg': 'Internal Server Error',
|
||||||
'payload': {'code': e.args[0], 'desc': e.args[1]}
|
'payload': {'code': e.args[0], 'desc': e.args[1]}
|
||||||
}
|
}
|
||||||
payload = {'code': cqi_status,
|
payload = {'code': cqi_status.code,
|
||||||
'msg': cqi.api.specification.lookup[cqi_status]}
|
'msg': cqi_status.__class__.__name__}
|
||||||
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
||||||
|
|
||||||
|
|
||||||
@ -28,8 +28,8 @@ def cqi_connect(cqi_client: cqi.CQiClient):
|
|||||||
@cqi_over_socketio
|
@cqi_over_socketio
|
||||||
def cqi_disconnect(cqi_client: cqi.CQiClient):
|
def cqi_disconnect(cqi_client: cqi.CQiClient):
|
||||||
cqi_status = cqi_client.disconnect()
|
cqi_status = cqi_client.disconnect()
|
||||||
payload = {'code': cqi_status,
|
payload = {'code': cqi_status.code,
|
||||||
'msg': cqi.api.specification.lookup[cqi_status]}
|
'msg': cqi_status.__class__.__name__}
|
||||||
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
||||||
|
|
||||||
|
|
||||||
@ -38,6 +38,6 @@ def cqi_disconnect(cqi_client: cqi.CQiClient):
|
|||||||
@cqi_over_socketio
|
@cqi_over_socketio
|
||||||
def cqi_ping(cqi_client: cqi.CQiClient):
|
def cqi_ping(cqi_client: cqi.CQiClient):
|
||||||
cqi_status = cqi_client.ping()
|
cqi_status = cqi_client.ping()
|
||||||
payload = {'code': cqi_status,
|
payload = {'code': cqi_status.code,
|
||||||
'msg': cqi.api.specification.lookup[cqi_status]}
|
'msg': cqi_status.__class__.__name__}
|
||||||
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
||||||
|
@ -16,8 +16,8 @@ from .utils import cqi_over_socketio, lookups_by_cpos
|
|||||||
def cqi_corpora_corpus_drop(cqi_client: cqi.CQiClient, corpus_name: str):
|
def cqi_corpora_corpus_drop(cqi_client: cqi.CQiClient, corpus_name: str):
|
||||||
cqi_corpus = cqi_client.corpora.get(corpus_name)
|
cqi_corpus = cqi_client.corpora.get(corpus_name)
|
||||||
cqi_status = cqi_corpus.drop()
|
cqi_status = cqi_corpus.drop()
|
||||||
payload = {'code': cqi_status,
|
payload = {'code': cqi_status.code,
|
||||||
'msg': cqi.api.specification.lookup[cqi_status]}
|
'msg': cqi_status.__class__.__name__}
|
||||||
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
||||||
|
|
||||||
|
|
||||||
@ -27,8 +27,8 @@ def cqi_corpora_corpus_drop(cqi_client: cqi.CQiClient, corpus_name: str):
|
|||||||
def cqi_corpora_corpus_query(cqi_client: cqi.CQiClient, corpus_name: str, subcorpus_name: str, query: str): # noqa
|
def cqi_corpora_corpus_query(cqi_client: cqi.CQiClient, corpus_name: str, subcorpus_name: str, query: str): # noqa
|
||||||
cqi_corpus = cqi_client.corpora.get(corpus_name)
|
cqi_corpus = cqi_client.corpora.get(corpus_name)
|
||||||
cqi_status = cqi_corpus.query(subcorpus_name, query)
|
cqi_status = cqi_corpus.query(subcorpus_name, query)
|
||||||
payload = {'code': cqi_status,
|
payload = {'code': cqi_status.code,
|
||||||
'msg': cqi.api.specification.lookup[cqi_status]}
|
'msg': cqi_status.__class__.__name__}
|
||||||
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
||||||
|
|
||||||
|
|
||||||
@ -50,177 +50,186 @@ def cqi_corpora_corpus_update_db(cqi_client: cqi.CQiClient, corpus_name: str):
|
|||||||
@cqi_over_socketio
|
@cqi_over_socketio
|
||||||
def cqi_corpora_corpus_get_visualization_data(cqi_client: cqi.CQiClient, corpus_name: str):
|
def cqi_corpora_corpus_get_visualization_data(cqi_client: cqi.CQiClient, corpus_name: str):
|
||||||
corpus = cqi_client.corpora.get(corpus_name)
|
corpus = cqi_client.corpora.get(corpus_name)
|
||||||
# s_attrs = [x for x in corpus.structural_attributes.list() if not x.has_values]
|
payload = {
|
||||||
# p_attrs = corpus.positional_attributes.list()
|
'corpus': {
|
||||||
# payload = {
|
'bounds': [0, corpus.size - 1],
|
||||||
# 's_attrs': {},
|
'counts': {},
|
||||||
# 'p_attrs': {},
|
'freqs': {}
|
||||||
# 'values': {
|
},
|
||||||
# 's_attrs': {},
|
'p_attrs': {},
|
||||||
# 'p_attrs': {}
|
's_attrs': {},
|
||||||
# }
|
'values': {'p_attrs': {}, 's_attrs': {}}
|
||||||
# }
|
}
|
||||||
# for s_attr in s_attrs:
|
for p_attr in corpus.positional_attributes.list():
|
||||||
# s_attr_lbound, s_attr_rbound = s_attr.cpos_by_id(text_id)
|
payload['corpus']['freqs'][p_attr.name] = dict(
|
||||||
# s_attr_cpos_range = range(s_attr_lbound, s_attr_rbound + 1)
|
zip(
|
||||||
# payload['text']['lexicon'][text_id] = {
|
range(0, p_attr.lexicon_size),
|
||||||
# 's_attrs': [s_attr_lbound, s_attr_rbound],
|
p_attr.freqs_by_ids(list(range(0, p_attr.lexicon_size)))
|
||||||
# 'counts': {
|
)
|
||||||
# 'token': s_attr_rbound - s_attr_lbound + 1
|
)
|
||||||
# },
|
payload['p_attrs'][p_attr.name] = dict(
|
||||||
# 'freqs': {
|
zip(
|
||||||
# p_attr.name: dict(Counter(p_attr.ids_by_cpos(list(s_attr_cpos_range))))
|
range(0, corpus.size),
|
||||||
# for p_attr in p_attrs
|
p_attr.ids_by_cpos(list(range(0, corpus.size)))
|
||||||
# }
|
)
|
||||||
# }
|
)
|
||||||
# for p_attr in p_attrs:
|
payload['values']['p_attrs'][p_attr.name] = dict(
|
||||||
# payload['p_attrs'] = dict(
|
zip(
|
||||||
|
range(0, p_attr.lexicon_size),
|
||||||
# )
|
p_attr.values_by_ids(list(range(0, p_attr.lexicon_size)))
|
||||||
# payload['values']['p_attrs'] = dict(
|
)
|
||||||
# zip(
|
)
|
||||||
# range(0, p_attr.lexicon_size),
|
|
||||||
# p_attr.values_by_ids(list(range(0, p_attr.lexicon_size)))
|
|
||||||
# )
|
|
||||||
# )
|
|
||||||
text = corpus.structural_attributes.get('text')
|
|
||||||
text_value_names = []
|
|
||||||
text_values = []
|
|
||||||
for text_sub_attr in corpus.structural_attributes.list(filters={'part_of': text}):
|
|
||||||
text_value_names.append(text_sub_attr.name[(len(text.name) + 1):])
|
|
||||||
text_values.append(text_sub_attr.values_by_ids(list(range(0, text.size))))
|
|
||||||
s = corpus.structural_attributes.get('s')
|
s = corpus.structural_attributes.get('s')
|
||||||
ent = corpus.structural_attributes.get('ent')
|
ent = corpus.structural_attributes.get('ent')
|
||||||
ent_value_names = []
|
for s_attr in corpus.structural_attributes.list():
|
||||||
ent_values = []
|
if s_attr.has_values:
|
||||||
for ent_sub_attr in corpus.structural_attributes.list(filters={'part_of': ent}):
|
continue
|
||||||
ent_value_names.append(ent_sub_attr.name[(len(ent.name) + 1):])
|
payload['corpus']['counts'][s_attr.name] = s_attr.size
|
||||||
ent_values.append(ent_sub_attr.values_by_ids(list(range(0, ent.size))))
|
payload['s_attrs'][s_attr.name] = {'lexicon': {}, 'values': []}
|
||||||
word = corpus.positional_attributes.get('word')
|
for id in range(0, s_attr.size):
|
||||||
lemma = corpus.positional_attributes.get('lemma')
|
payload['s_attrs'][s_attr.name]['lexicon'][id] = {}
|
||||||
pos = corpus.positional_attributes.get('pos')
|
if s_attr.name != 'text':
|
||||||
simple_pos = corpus.positional_attributes.get('simple_pos')
|
continue
|
||||||
payload = {}
|
lbound, rbound = s_attr.cpos_by_id(id)
|
||||||
payload['corpus'] = {'lexicon': {}, 'values': []}
|
payload['s_attrs'][s_attr.name]['lexicon'][id]['bounds'] = [lbound, rbound]
|
||||||
payload['corpus']['lexicon'][0] = {
|
cpos_range = range(lbound, rbound + 1)
|
||||||
'bounds': [0, corpus.size - 1],
|
payload['s_attrs'][s_attr.name]['lexicon'][id]['counts'] = {}
|
||||||
'counts': {
|
payload['s_attrs'][s_attr.name]['lexicon'][id]['counts']['s'] = len({x for x in s.ids_by_cpos(list(cpos_range)) if x != -1})
|
||||||
'text': text.size,
|
payload['s_attrs'][s_attr.name]['lexicon'][id]['counts']['ent'] = len({x for x in ent.ids_by_cpos(list(cpos_range)) if x != -1})
|
||||||
's': s.size,
|
payload['s_attrs'][s_attr.name]['lexicon'][id]['counts']['token'] = rbound - lbound + 1
|
||||||
'ent': ent.size,
|
payload['s_attrs'][s_attr.name]['lexicon'][id]['freqs'] = {}
|
||||||
'token': corpus.size
|
for p_attr in corpus.positional_attributes.list():
|
||||||
},
|
payload['s_attrs'][s_attr.name]['lexicon'][id]['freqs'][p_attr.name] = dict(Counter(p_attr.ids_by_cpos(list(cpos_range))))
|
||||||
'freqs': {
|
# for s_attr in s_attrs:
|
||||||
'word': dict(
|
# payload['s_attrs'][s_attr.name] = {'lexicon': {}, 'values': []}
|
||||||
zip(
|
# payload['values']['s_attrs'][s_attr.name] = {}
|
||||||
range(0, word.lexicon_size),
|
# for id in range(0, s_attr.size):
|
||||||
word.freqs_by_ids(list(range(0, word.lexicon_size)))
|
# payload['s_attrs'][s_attr.name]['lexicon'][id] = {}
|
||||||
)
|
# if s_attr.name != 'text':
|
||||||
),
|
# continue
|
||||||
'lemma': dict(
|
# lbound, rbound = s_attr.cpos_by_id(id)
|
||||||
zip(
|
# cpos_range = range(lbound, rbound + 1)
|
||||||
range(0, lemma.lexicon_size),
|
# # s_ids
|
||||||
lemma.freqs_by_ids(list(range(0, lemma.lexicon_size)))
|
# payload['s_attrs'][s_attr.name]['lexicon'][id]['bounds'] = [lbound, rbound]
|
||||||
)
|
# payload['s_attrs'][s_attr.name]['lexicon'][id]['counts'] = {}
|
||||||
),
|
# payload['s_attrs'][s_attr.name]['lexicon'][id]['counts']['token'] = rbound - lbound + 1
|
||||||
'pos': dict(
|
# payload['s_attrs'][s_attr.name]['lexicon'][id]['freqs'] = {
|
||||||
zip(
|
# p_attr.name: dict(Counter(p_attr.ids_by_cpos(list(cpos_range))))
|
||||||
range(0, pos.lexicon_size),
|
# for p_attr in p_attrs
|
||||||
pos.freqs_by_ids(list(range(0, pos.lexicon_size)))
|
# }
|
||||||
)
|
# for sub_attr in corpus.structural_attributes.list(filters={'part_of': s_attr}):
|
||||||
),
|
# payload['s_attrs'][s_attr.name]['values'].append(sub_attr.name[(len(s_attr.name) + 1):])
|
||||||
'simple_pos': dict(
|
# payload['values']['s_attrs'][s_attr.name][sub_attr.name[(len(s_attr.name) + 1):]] = dict(
|
||||||
zip(
|
# zip(
|
||||||
range(0, simple_pos.lexicon_size),
|
# range(0, sub_attr.size),
|
||||||
simple_pos.freqs_by_ids(list(range(0, simple_pos.lexicon_size)))
|
# sub_attr.values_by_ids(list(range(0, sub_attr.size)))
|
||||||
)
|
# )
|
||||||
)
|
# )
|
||||||
}
|
# text = corpus.structural_attributes.get('text')
|
||||||
}
|
|
||||||
payload['text'] = {'lexicon': {}, 'values': None}
|
# text = corpus.structural_attributes.get('text')
|
||||||
for text_id in range(0, text.size):
|
# text_value_names = []
|
||||||
text_lbound, text_rbound = text.cpos_by_id(text_id)
|
# text_values = []
|
||||||
text_cpos_range = range(text_lbound, text_rbound + 1)
|
# for text_sub_attr in corpus.structural_attributes.list(filters={'part_of': text}):
|
||||||
text_s_ids = s.ids_by_cpos(list(text_cpos_range))
|
# text_value_names.append(text_sub_attr.name[(len(text.name) + 1):])
|
||||||
text_ent_ids = ent.ids_by_cpos(list(text_cpos_range))
|
# text_values.append(text_sub_attr.values_by_ids(list(range(0, text.size))))
|
||||||
payload['text']['lexicon'][text_id] = {
|
# s = corpus.structural_attributes.get('s')
|
||||||
'bounds': [text_lbound, text_rbound],
|
# ent = corpus.structural_attributes.get('ent')
|
||||||
'counts': {
|
# ent_value_names = []
|
||||||
's': len([x for x in text_s_ids if x != -1]),
|
# ent_values = []
|
||||||
'ent': len([x for x in text_ent_ids if x != -1]),
|
# for ent_sub_attr in corpus.structural_attributes.list(filters={'part_of': ent}):
|
||||||
'token': text_rbound - text_lbound + 1
|
# ent_value_names.append(ent_sub_attr.name[(len(ent.name) + 1):])
|
||||||
},
|
# ent_values.append(ent_sub_attr.values_by_ids(list(range(0, ent.size))))
|
||||||
'freqs': {
|
# word = corpus.positional_attributes.get('word')
|
||||||
'word': dict(
|
# lemma = corpus.positional_attributes.get('lemma')
|
||||||
Counter(word.ids_by_cpos(list(text_cpos_range)))
|
# pos = corpus.positional_attributes.get('pos')
|
||||||
),
|
# simple_pos = corpus.positional_attributes.get('simple_pos')
|
||||||
'lemma': dict(
|
# payload = {}
|
||||||
Counter(lemma.ids_by_cpos(list(text_cpos_range)))
|
|
||||||
),
|
# payload['text'] = {'lexicon': {}, 'values': None}
|
||||||
'pos': dict(
|
# for text_id in range(0, text.size):
|
||||||
Counter(pos.ids_by_cpos(list(text_cpos_range)))
|
# text_lbound, text_rbound = text.cpos_by_id(text_id)
|
||||||
),
|
# text_cpos_range = range(text_lbound, text_rbound + 1)
|
||||||
'simple_pos': dict(
|
# text_s_ids = s.ids_by_cpos(list(text_cpos_range))
|
||||||
Counter(simple_pos.ids_by_cpos(list(text_cpos_range)))
|
# text_ent_ids = ent.ids_by_cpos(list(text_cpos_range))
|
||||||
)
|
# payload['text']['lexicon'][text_id] = {
|
||||||
}
|
# 'bounds': [text_lbound, text_rbound],
|
||||||
}
|
# 'counts': {
|
||||||
payload['text']['values'] = text_value_names
|
# 's': len([x for x in text_s_ids if x != -1]),
|
||||||
payload['s'] = {'lexicon': {}, 'values': None}
|
# 'ent': len([x for x in text_ent_ids if x != -1]),
|
||||||
for s_id in range(0, s.size):
|
# 'token': text_rbound - text_lbound + 1
|
||||||
payload['s']['lexicon'][s_id] = {
|
# },
|
||||||
# 'bounds': s.cpos_by_id(s_id)
|
# 'freqs': {
|
||||||
}
|
# 'word': dict(
|
||||||
payload['s']['values'] = [
|
# Counter(word.ids_by_cpos(list(text_cpos_range)))
|
||||||
sub_attr.name[(len(s.name) + 1):]
|
# ),
|
||||||
for sub_attr in corpus.structural_attributes.list(filters={'part_of': s})
|
# 'lemma': dict(
|
||||||
]
|
# Counter(lemma.ids_by_cpos(list(text_cpos_range)))
|
||||||
payload['ent'] = {'lexicon': {}, 'values': None}
|
# ),
|
||||||
for ent_id in range(0, ent.size):
|
# 'pos': dict(
|
||||||
payload['ent']['lexicon'][ent_id] = {
|
# Counter(pos.ids_by_cpos(list(text_cpos_range)))
|
||||||
# 'bounds': ent.cpos_by_id(ent_id)
|
# ),
|
||||||
}
|
# 'simple_pos': dict(
|
||||||
payload['ent']['values'] = ent_value_names
|
# Counter(simple_pos.ids_by_cpos(list(text_cpos_range)))
|
||||||
payload['lookups'] = {
|
# )
|
||||||
'corpus': {},
|
# }
|
||||||
'text': {
|
# }
|
||||||
text_id: {
|
# payload['text']['values'] = text_value_names
|
||||||
text_value_name: text_values[text_value_name_idx][text_id_idx]
|
# payload['s'] = {'lexicon': {}, 'values': None}
|
||||||
for text_value_name_idx, text_value_name in enumerate(text_value_names)
|
# for s_id in range(0, s.size):
|
||||||
} for text_id_idx, text_id in enumerate(range(0, text.size))
|
# payload['s']['lexicon'][s_id] = {
|
||||||
},
|
# # 'bounds': s.cpos_by_id(s_id)
|
||||||
's': {},
|
# }
|
||||||
'ent': {
|
# payload['s']['values'] = [
|
||||||
ent_id: {
|
# sub_attr.name[(len(s.name) + 1):]
|
||||||
ent_value_name: ent_values[ent_value_name_idx][ent_id_idx]
|
# for sub_attr in corpus.structural_attributes.list(filters={'part_of': s})
|
||||||
for ent_value_name_idx, ent_value_name in enumerate(ent_value_names)
|
# ]
|
||||||
} for ent_id_idx, ent_id in enumerate(range(0, ent.size))
|
# payload['ent'] = {'lexicon': {}, 'values': None}
|
||||||
},
|
# for ent_id in range(0, ent.size):
|
||||||
'word': dict(
|
# payload['ent']['lexicon'][ent_id] = {
|
||||||
zip(
|
# # 'bounds': ent.cpos_by_id(ent_id)
|
||||||
range(0, word.lexicon_size),
|
# }
|
||||||
word.values_by_ids(list(range(0, word.lexicon_size)))
|
# payload['ent']['values'] = ent_value_names
|
||||||
)
|
# payload['lookups'] = {
|
||||||
),
|
# 'corpus': {},
|
||||||
'lemma': dict(
|
# 'text': {
|
||||||
zip(
|
# text_id: {
|
||||||
range(0, lemma.lexicon_size),
|
# text_value_name: text_values[text_value_name_idx][text_id_idx]
|
||||||
lemma.values_by_ids(list(range(0, lemma.lexicon_size)))
|
# for text_value_name_idx, text_value_name in enumerate(text_value_names)
|
||||||
)
|
# } for text_id_idx, text_id in enumerate(range(0, text.size))
|
||||||
),
|
# },
|
||||||
'pos': dict(
|
# 's': {},
|
||||||
zip(
|
# 'ent': {
|
||||||
range(0, pos.lexicon_size),
|
# ent_id: {
|
||||||
pos.values_by_ids(list(range(0, pos.lexicon_size)))
|
# ent_value_name: ent_values[ent_value_name_idx][ent_id_idx]
|
||||||
)
|
# for ent_value_name_idx, ent_value_name in enumerate(ent_value_names)
|
||||||
),
|
# } for ent_id_idx, ent_id in enumerate(range(0, ent.size))
|
||||||
'simple_pos': dict(
|
# },
|
||||||
zip(
|
# 'word': dict(
|
||||||
range(0, simple_pos.lexicon_size),
|
# zip(
|
||||||
simple_pos.values_by_ids(list(range(0, simple_pos.lexicon_size)))
|
# range(0, word.lexicon_size),
|
||||||
)
|
# word.values_by_ids(list(range(0, word.lexicon_size)))
|
||||||
)
|
# )
|
||||||
}
|
# ),
|
||||||
|
# 'lemma': dict(
|
||||||
|
# zip(
|
||||||
|
# range(0, lemma.lexicon_size),
|
||||||
|
# lemma.values_by_ids(list(range(0, lemma.lexicon_size)))
|
||||||
|
# )
|
||||||
|
# ),
|
||||||
|
# 'pos': dict(
|
||||||
|
# zip(
|
||||||
|
# range(0, pos.lexicon_size),
|
||||||
|
# pos.values_by_ids(list(range(0, pos.lexicon_size)))
|
||||||
|
# )
|
||||||
|
# ),
|
||||||
|
# 'simple_pos': dict(
|
||||||
|
# zip(
|
||||||
|
# range(0, simple_pos.lexicon_size),
|
||||||
|
# simple_pos.values_by_ids(list(range(0, simple_pos.lexicon_size)))
|
||||||
|
# )
|
||||||
|
# )
|
||||||
|
# }
|
||||||
# print(payload)
|
# print(payload)
|
||||||
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
||||||
|
|
||||||
|
@ -32,8 +32,8 @@ def cqi_corpora_corpus_subcorpora_subcorpus_drop(cqi_client: cqi.CQiClient, corp
|
|||||||
cqi_corpus = cqi_client.corpora.get(corpus_name)
|
cqi_corpus = cqi_client.corpora.get(corpus_name)
|
||||||
cqi_subcorpus = cqi_corpus.subcorpora.get(subcorpus_name)
|
cqi_subcorpus = cqi_corpus.subcorpora.get(subcorpus_name)
|
||||||
cqi_status = cqi_subcorpus.drop()
|
cqi_status = cqi_subcorpus.drop()
|
||||||
payload = {'code': cqi_status,
|
payload = {'code': cqi_status.code,
|
||||||
'msg': cqi.api.specification.lookup[cqi_status]}
|
'msg': cqi_status.__class__.__name__}
|
||||||
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
return {'code': 200, 'msg': 'OK', 'payload': payload}
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user