File size: 3,017 Bytes
86c11ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from flair.nn import Classifier\n",
    "from flair.data import Sentence\n",
    "\n",
    "linker = Classifier.load('linker')\n",
    "ner = Classifier.load('flair/ner-english-ontonotes-fast')\n",
    "\n",
    "def linker_model(input_text, tagger):\n",
    "    \"\"\"Linker model predict tags for sentences\"\"\"\n",
    "    sentence = Sentence(input_text)\n",
    "    tagger.predict(sentence)\n",
    "\n",
    "    # iterate through sentences and print predicted labels\n",
    "    label_dict = {}\n",
    "    for label in sentence.get_labels():\n",
    "        if label.data_point.text.endswith(\"F.C.\"):\n",
    "            continue\n",
    "        if (label.score>0.5) & (label.value != \"<unk>\"):\n",
    "            if label.value in label_dict:\n",
    "                label_dict[label.value].append(label.data_point.text)\n",
    "            else:\n",
    "                label_dict[label.value] = [label.data_point.text]\n",
    "    return label_dict\n",
    "\n",
    "def find_keys_by_value(dictionary, value):\n",
    "    \"\"\"Find key by value\"\"\"\n",
    "    keys = []\n",
    "    for key, values in dictionary.items():\n",
    "        if value in values:\n",
    "            keys.append(key)\n",
    "    return keys\n",
    "\n",
    "def recognition_model(input_text, label_dict, tagger):\n",
    "    \"\"\"recognition model\"\"\"\n",
    "    ner_dict = {}\n",
    "    score_dict = {}\n",
    "    sentence = Sentence(input_text)\n",
    "    tagger.predict(sentence)\n",
    "    # for sentence in sentences:\n",
    "    for label in sentence.get_labels():\n",
    "        if label.score>0.5:\n",
    "            data_point = label.data_point.text\n",
    "            label_value = label.value\n",
    "            # label_value PER\n",
    "            keys = find_keys_by_value(label_dict, data_point)\n",
    "            if len(keys)>0:\n",
    "                if label_value in ner_dict:\n",
    "                    if keys[0] not in ner_dict[label_value]:\n",
    "                        ner_dict[label.value].append(keys[0])\n",
    "                        score_dict[keys[0]] = label.score\n",
    "                else:\n",
    "                    ner_dict[label.value] = [keys[0]]\n",
    "                    score_dict[keys[0]] = label.score\n",
    "    return ner_dict, score_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for _, item in df.iterrows():\n",
    "    label_dict = linker_model(item['content'],linker)\n",
    "    ner_dict, score_dict = recognition_model(item['content'], label_dict, ner)\n",
    "    # result = result_dictionary_constructor(ner_dict, label_dict)\n",
    "    print(ner_dict, score_dict)"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}