mirror of
https://gitlab.ub.uni-bielefeld.de/sfb1288inf/nopaque.git
synced 2024-12-24 10:34:17 +00:00
Work on new list bilding
This commit is contained in:
parent
c1adcb93ee
commit
da4cc75943
@ -233,20 +233,26 @@ class CQiWrapper(CQiClient):
|
|||||||
tmp_info[struct_attr_key].append(id)
|
tmp_info[struct_attr_key].append(id)
|
||||||
else:
|
else:
|
||||||
structs_to_check.append({key: struct_attr_key})
|
structs_to_check.append({key: struct_attr_key})
|
||||||
|
logger.warning('Structs to check: {}'.format(structs_to_check))
|
||||||
struct_attr_values = list(tmp_info.values())
|
struct_attr_values = list(tmp_info.values())
|
||||||
|
# logger.warning('Struct attr value list: {}'.format(struct_attr_values))
|
||||||
struct_attr_keys = list(tmp_info.keys())
|
struct_attr_keys = list(tmp_info.keys())
|
||||||
|
# logger.warning('Struct attr key list: {}'.format(struct_attr_keys))
|
||||||
|
|
||||||
# Build textlookup dictionary
|
# Build textlookup dictionary
|
||||||
text_lookup_ids = list(set(struct_attr_values[0])) # First is always one text
|
text_lookup_ids = list(set(struct_attr_values[0])) # every CPOS is associated with one text id. A set is build to only gather text_lookup informations for every unique text id
|
||||||
text_lookup = {}
|
text_lookup = {} # final dict containing all info of one text identified by its id
|
||||||
for d in structs_to_check:
|
for d in structs_to_check:
|
||||||
s_key, s_value = zip(*d.items())
|
s_key, s_value = zip(*d.items())
|
||||||
s_value = s_value[0].split('_', 1)[1]
|
logger.warning('dict entries: {}: {}'.format(s_key, s_value))
|
||||||
|
s_value = s_value[0].split('_', 1)[-1]
|
||||||
|
logger.warning('S_VALUE: {}'.format(s_value))
|
||||||
struct_values = self.cl_struc2str(s_key[0], text_lookup_ids)
|
struct_values = self.cl_struc2str(s_key[0], text_lookup_ids)
|
||||||
|
logger.warning('Extracted Value with key {}: {}'.format(s_key[0],struct_values))
|
||||||
zipped = dict(zip(text_lookup_ids, struct_values))
|
zipped = dict(zip(text_lookup_ids, struct_values))
|
||||||
for zip_key, zip_value in zipped.items():
|
for zip_key, zip_value in zipped.items():
|
||||||
logger.warning('key: {}'.format(zip_key))
|
logger.warning('Text id as key is: {}'.format(zip_key))
|
||||||
logger.warning('value: {}'.format(zip_value))
|
logger.warning('Value of this text is: {}'.format(zip_value))
|
||||||
check = text_lookup.get(zip_key)
|
check = text_lookup.get(zip_key)
|
||||||
logger.warning('check: {}'.format(check))
|
logger.warning('check: {}'.format(check))
|
||||||
if check is None:
|
if check is None:
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
class JobList extends List {
|
|
||||||
|
|
||||||
}
|
|
@ -243,6 +243,74 @@ class JobList extends List {
|
|||||||
return rowElement;
|
return rowElement;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ResultList extends List {
|
||||||
|
|
||||||
|
createResultRowElement(item) {
|
||||||
|
let values, cpos, matchRowElement, lcCellElement, lcTokenElement, token;
|
||||||
|
// gather values from item
|
||||||
|
values = item.values();
|
||||||
|
|
||||||
|
// get infos for full match row
|
||||||
|
matchRowElement = document.createElement("tr");
|
||||||
|
for (cpos of values["lc"]) {
|
||||||
|
console.log(cpos);
|
||||||
|
lcCellElement = document.createElement("td");
|
||||||
|
lcTokenElement = document.createElement("span");
|
||||||
|
lcTokenElement.classList.add("token");
|
||||||
|
lcTokenElement.dataset.cpos = cpos;
|
||||||
|
token = chunk["cpos_lookup"][cpos];
|
||||||
|
lcTokenElement = token["word"];
|
||||||
|
console.log(lcTokenElement.outerHTML);
|
||||||
|
// let hit_tokens = "";
|
||||||
|
}
|
||||||
|
// // get infos of match
|
||||||
|
// let textTitles = new Set();
|
||||||
|
// for (cpos of match["hit"]) {
|
||||||
|
// tokenElement = document.createElement("span");
|
||||||
|
// tokenElement.classList.add("token");
|
||||||
|
// tokenElement.dataset.cpos = cpos;
|
||||||
|
// token = chunk["cpos_lookup"][cpos];
|
||||||
|
// tokenElement.innerText = token["word"];
|
||||||
|
// hit_tokens += " " + tokenElement.outerHTML;
|
||||||
|
// // get text titles of every hit cpos token
|
||||||
|
// textTitles.add(chunk["text_lookup"][token["text"]]["title"]);
|
||||||
|
// }
|
||||||
|
// // add button to trigger more context to every match td
|
||||||
|
// var inspectBtn = document.createElement("a");
|
||||||
|
// inspectBtn.setAttribute("class", "btn-floating btn-flat waves-effect waves-light grey right inspect");
|
||||||
|
// inspectBtn.onclick = function() {inspect()};
|
||||||
|
// inspectBtn.innerHTML = '<i class="material-icons">search</i>';
|
||||||
|
// hit_tokens += "<p>" + inspectBtn.outerHTML + "</p>";
|
||||||
|
// // get infos for right context of match
|
||||||
|
// let rc_tokens = "";
|
||||||
|
// for (cpos of match["rc"]) {
|
||||||
|
// tokenElement = document.createElement("span");
|
||||||
|
// tokenElement.classList.add("token");
|
||||||
|
// tokenElement.dataset.cpos = cpos;
|
||||||
|
// token = chunk["cpos_lookup"][cpos];
|
||||||
|
// tokenElement.innerText = token["word"];
|
||||||
|
// rc_tokens += " " + tokenElement.outerHTML;
|
||||||
|
// }
|
||||||
|
// // put all infos into an javascribt object
|
||||||
|
// textTitleElement = document.createElement("span");
|
||||||
|
// textTitleElement.classList.add("text-titles");
|
||||||
|
// textTitles = [...textTitles].join(",");
|
||||||
|
// textTitleElement.innerText = textTitles;
|
||||||
|
//
|
||||||
|
// matchRowElement.appendChild(textTitleElement);
|
||||||
|
// // matchRowElement.appendChild(lc_tokens);
|
||||||
|
// // matchRowElement.appendChild(hit_tokens);
|
||||||
|
// // matchRowElement.appendChild(rc_tokens);
|
||||||
|
// // matchRowElement.appendChild(index);
|
||||||
|
// }
|
||||||
|
// return matchRowElement
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
JobList.DEFAULT_OPTIONS = {item: "<br>",
|
JobList.DEFAULT_OPTIONS = {item: "<br>",
|
||||||
page: 4,
|
page: 4,
|
||||||
pagination: {innerWindow: 8, outerWindow: 1},
|
pagination: {innerWindow: 8, outerWindow: 1},
|
||||||
|
@ -310,15 +310,14 @@
|
|||||||
innerWindow: 8,
|
innerWindow: 8,
|
||||||
outerWindow: 1
|
outerWindow: 1
|
||||||
}],
|
}],
|
||||||
valueNames: ["titles", "lc", "hit", "rc"],
|
valueNames: ["titles", "lc", "hit", "rc", {data: ["index"]}],
|
||||||
item: `<tr>
|
item: `<tr>
|
||||||
<td class="titles"></td>
|
<td class="titles"></td>
|
||||||
<td class="lc"></td>
|
<td class="lc"></td>
|
||||||
<td class="hit"></td>
|
<td class="hit"></td>
|
||||||
<td class="rc"></td>
|
<td class="rc"></td>
|
||||||
</tr>`};
|
</tr>`};
|
||||||
resultList = new List('result-list', options);
|
resultList = new ResultList('result-list', options);
|
||||||
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
@ -326,14 +325,14 @@
|
|||||||
nopaque.socket.on("corpus_analysis_query", function(response) {
|
nopaque.socket.on("corpus_analysis_query", function(response) {
|
||||||
// ERROR code checking
|
// ERROR code checking
|
||||||
if (response["code"] === 0) {
|
if (response["code"] === 0) {
|
||||||
console.log("[ERROR] corpus_analysis_init");
|
console.log("[SUCCESS] corpus_analysis_init");
|
||||||
console.log("Code:" + response["code"]);
|
console.log("Code:" + response["code"]);
|
||||||
// further code execution of this code block starting in line 342
|
// further code execution of this code block starting in line 342
|
||||||
} else if (response["code"] === 1) {
|
} else if (response["code"] === 1) {
|
||||||
queryResultsTableElement.classList.add("hide");
|
queryResultsTableElement.classList.add("hide");
|
||||||
queryLoadingElement.classList.add("hide");
|
queryLoadingElement.classList.add("hide");
|
||||||
nopaque.toast("Invalid query entered!", "red");
|
nopaque.toast("Invalid query entered!", "red");
|
||||||
console.log("[SUCCESS] corpus_analysis_init");
|
console.log("[ERROR] corpus_analysis_init");
|
||||||
console.log("Code:" + response["code"]);
|
console.log("Code:" + response["code"]);
|
||||||
return; // no further code execution of this code block
|
return; // no further code execution of this code block
|
||||||
} else {
|
} else {
|
||||||
@ -369,40 +368,29 @@
|
|||||||
// List building/appending the chunks when query had results
|
// List building/appending the chunks when query had results
|
||||||
// write metadata query information into HTML elements
|
// write metadata query information into HTML elements
|
||||||
// like nr. of all matches in how many files etc.
|
// like nr. of all matches in how many files etc.
|
||||||
// TODO: count_corpus_files müssen aus full results genommen werden.
|
// TODO: count_corpus_files müssen aus full results genommen werden. Ist am Ende richtig aber dazwischen zählt es hoch
|
||||||
match_count = chunk["match_count"];
|
match_count = chunk["match_count"];
|
||||||
let count_corpus_files = Object.keys(result["text_lookup"]).length;
|
let count_corpus_files = Object.keys(result["text_lookup"]).length;
|
||||||
queryResultsMetadataElement.innerHTML = chunk["match_count"] + " matches in " + count_corpus_files + " corpus files.";
|
queryResultsMetadataElement.innerHTML = chunk["match_count"] + " matches in " + count_corpus_files + " corpus files.";
|
||||||
queryResultsMetadataElement.appendChild(exportQueryResults);
|
queryResultsMetadataElement.appendChild(exportQueryResults);
|
||||||
exportQueryResults.classList.remove("hide");
|
exportQueryResults.classList.remove("hide");
|
||||||
|
|
||||||
var toAdd = [];
|
var resultItems = []; // list for holding every row item
|
||||||
|
// get infos for full match row
|
||||||
for (let [index, match] of chunk["matches"].entries()) {
|
for (let [index, match] of chunk["matches"].entries()) {
|
||||||
lc_tokens = "";
|
resultItems.push({...match, ...{"index": index}});
|
||||||
for (cpos of match["lc"]) {
|
}
|
||||||
word = chunk["cpos_lookup"][cpos]["word"];
|
resultList.add(resultItems, items => {
|
||||||
lc_tokens += " " + word;
|
for (let item of items) {
|
||||||
}
|
item.elm = resultList.createResultRowElement(item);}
|
||||||
// console.log(lc_tokens);
|
});
|
||||||
hit_tokens = "";
|
resultList.update();
|
||||||
for (cpos of match["hit"]) {
|
});
|
||||||
word = chunk["cpos_lookup"][cpos]["word"];
|
|
||||||
hit_tokens += " " + word;
|
// inspect match functions
|
||||||
}
|
function inspect() {
|
||||||
// console.log(hit_tokens);
|
console.log("Inspect!")
|
||||||
rc_tokens = "";
|
|
||||||
for (cpos of match["rc"]) {
|
|
||||||
word = chunk["cpos_lookup"][cpos]["word"];
|
|
||||||
rc_tokens += " " + word;
|
|
||||||
}
|
|
||||||
// console.log(rc_tokens);
|
|
||||||
item = { titles: "test", lc: lc_tokens, hit: hit_tokens, rc: rc_tokens };
|
|
||||||
toAdd.push(item);
|
|
||||||
}
|
}
|
||||||
resultList.add(toAdd, function(toAdd) {console.log('All '
|
|
||||||
+ toAdd.length
|
|
||||||
+ ' results were added!')});
|
|
||||||
});
|
|
||||||
|
|
||||||
// Function to download data to a file
|
// Function to download data to a file
|
||||||
function download(downloadElem, data, filename, type) {
|
function download(downloadElem, data, filename, type) {
|
||||||
|
Loading…
Reference in New Issue
Block a user